1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_HEAP_HEAP_INL_H_
6 #define V8_HEAP_HEAP_INL_H_
7
8 #include <cmath>
9
10 #include "src/base/platform/platform.h"
11 #include "src/counters-inl.h"
12 #include "src/feedback-vector-inl.h"
13 #include "src/heap/heap.h"
14 #include "src/heap/incremental-marking-inl.h"
15 #include "src/heap/mark-compact.h"
16 #include "src/heap/object-stats.h"
17 #include "src/heap/remembered-set.h"
18 #include "src/heap/spaces-inl.h"
19 #include "src/heap/store-buffer.h"
20 #include "src/isolate.h"
21 #include "src/list-inl.h"
22 #include "src/log.h"
23 #include "src/msan.h"
24 #include "src/objects-inl.h"
25 #include "src/objects/scope-info.h"
26
27 namespace v8 {
28 namespace internal {
29
RetrySpace()30 AllocationSpace AllocationResult::RetrySpace() {
31 DCHECK(IsRetry());
32 return static_cast<AllocationSpace>(Smi::cast(object_)->value());
33 }
34
ToObjectChecked()35 HeapObject* AllocationResult::ToObjectChecked() {
36 CHECK(!IsRetry());
37 return HeapObject::cast(object_);
38 }
39
insert(HeapObject * target,int32_t size,bool was_marked_black)40 void PromotionQueue::insert(HeapObject* target, int32_t size,
41 bool was_marked_black) {
42 if (emergency_stack_ != NULL) {
43 emergency_stack_->Add(Entry(target, size, was_marked_black));
44 return;
45 }
46
47 if ((rear_ - 1) < limit_) {
48 RelocateQueueHead();
49 emergency_stack_->Add(Entry(target, size, was_marked_black));
50 return;
51 }
52
53 struct Entry* entry = reinterpret_cast<struct Entry*>(--rear_);
54 entry->obj_ = target;
55 entry->size_ = size;
56 entry->was_marked_black_ = was_marked_black;
57
58 // Assert no overflow into live objects.
59 #ifdef DEBUG
60 SemiSpace::AssertValidRange(target->GetIsolate()->heap()->new_space()->top(),
61 reinterpret_cast<Address>(rear_));
62 #endif
63 }
64
remove(HeapObject ** target,int32_t * size,bool * was_marked_black)65 void PromotionQueue::remove(HeapObject** target, int32_t* size,
66 bool* was_marked_black) {
67 DCHECK(!is_empty());
68 if (front_ == rear_) {
69 Entry e = emergency_stack_->RemoveLast();
70 *target = e.obj_;
71 *size = e.size_;
72 *was_marked_black = e.was_marked_black_;
73 return;
74 }
75
76 struct Entry* entry = reinterpret_cast<struct Entry*>(--front_);
77 *target = entry->obj_;
78 *size = entry->size_;
79 *was_marked_black = entry->was_marked_black_;
80
81 // Assert no underflow.
82 SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
83 reinterpret_cast<Address>(front_));
84 }
85
GetHeadPage()86 Page* PromotionQueue::GetHeadPage() {
87 return Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
88 }
89
SetNewLimit(Address limit)90 void PromotionQueue::SetNewLimit(Address limit) {
91 // If we are already using an emergency stack, we can ignore it.
92 if (emergency_stack_) return;
93
94 // If the limit is not on the same page, we can ignore it.
95 if (Page::FromAllocationAreaAddress(limit) != GetHeadPage()) return;
96
97 limit_ = reinterpret_cast<struct Entry*>(limit);
98
99 if (limit_ <= rear_) {
100 return;
101 }
102
103 RelocateQueueHead();
104 }
105
IsBelowPromotionQueue(Address to_space_top)106 bool PromotionQueue::IsBelowPromotionQueue(Address to_space_top) {
107 // If an emergency stack is used, the to-space address cannot interfere
108 // with the promotion queue.
109 if (emergency_stack_) return true;
110
111 // If the given to-space top pointer and the head of the promotion queue
112 // are not on the same page, then the to-space objects are below the
113 // promotion queue.
114 if (GetHeadPage() != Page::FromAddress(to_space_top)) {
115 return true;
116 }
117 // If the to space top pointer is smaller or equal than the promotion
118 // queue head, then the to-space objects are below the promotion queue.
119 return reinterpret_cast<struct Entry*>(to_space_top) <= rear_;
120 }
121
122 #define ROOT_ACCESSOR(type, name, camel_name) \
123 type* Heap::name() { return type::cast(roots_[k##camel_name##RootIndex]); }
124 ROOT_LIST(ROOT_ACCESSOR)
125 #undef ROOT_ACCESSOR
126
127 #define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
128 Map* Heap::name##_map() { return Map::cast(roots_[k##Name##MapRootIndex]); }
STRUCT_LIST(STRUCT_MAP_ACCESSOR)129 STRUCT_LIST(STRUCT_MAP_ACCESSOR)
130 #undef STRUCT_MAP_ACCESSOR
131
132 #define STRING_ACCESSOR(name, str) \
133 String* Heap::name() { return String::cast(roots_[k##name##RootIndex]); }
134 INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
135 #undef STRING_ACCESSOR
136
137 #define SYMBOL_ACCESSOR(name) \
138 Symbol* Heap::name() { return Symbol::cast(roots_[k##name##RootIndex]); }
139 PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
140 #undef SYMBOL_ACCESSOR
141
142 #define SYMBOL_ACCESSOR(name, description) \
143 Symbol* Heap::name() { return Symbol::cast(roots_[k##name##RootIndex]); }
144 PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
145 WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
146 #undef SYMBOL_ACCESSOR
147
148 #define ROOT_ACCESSOR(type, name, camel_name) \
149 void Heap::set_##name(type* value) { \
150 /* The deserializer makes use of the fact that these common roots are */ \
151 /* never in new space and never on a page that is being compacted. */ \
152 DCHECK(!deserialization_complete() || \
153 RootCanBeWrittenAfterInitialization(k##camel_name##RootIndex)); \
154 DCHECK(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \
155 roots_[k##camel_name##RootIndex] = value; \
156 }
157 ROOT_LIST(ROOT_ACCESSOR)
158 #undef ROOT_ACCESSOR
159
160 PagedSpace* Heap::paged_space(int idx) {
161 DCHECK_NE(idx, LO_SPACE);
162 DCHECK_NE(idx, NEW_SPACE);
163 return static_cast<PagedSpace*>(space_[idx]);
164 }
165
space(int idx)166 Space* Heap::space(int idx) { return space_[idx]; }
167
NewSpaceAllocationTopAddress()168 Address* Heap::NewSpaceAllocationTopAddress() {
169 return new_space_->allocation_top_address();
170 }
171
NewSpaceAllocationLimitAddress()172 Address* Heap::NewSpaceAllocationLimitAddress() {
173 return new_space_->allocation_limit_address();
174 }
175
OldSpaceAllocationTopAddress()176 Address* Heap::OldSpaceAllocationTopAddress() {
177 return old_space_->allocation_top_address();
178 }
179
OldSpaceAllocationLimitAddress()180 Address* Heap::OldSpaceAllocationLimitAddress() {
181 return old_space_->allocation_limit_address();
182 }
183
UpdateNewSpaceAllocationCounter()184 void Heap::UpdateNewSpaceAllocationCounter() {
185 new_space_allocation_counter_ = NewSpaceAllocationCounter();
186 }
187
NewSpaceAllocationCounter()188 size_t Heap::NewSpaceAllocationCounter() {
189 return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
190 }
191
192 template <>
IsOneByte(Vector<const char> str,int chars)193 bool inline Heap::IsOneByte(Vector<const char> str, int chars) {
194 // TODO(dcarney): incorporate Latin-1 check when Latin-1 is supported?
195 return chars == str.length();
196 }
197
198
199 template <>
IsOneByte(String * str,int chars)200 bool inline Heap::IsOneByte(String* str, int chars) {
201 return str->IsOneByteRepresentation();
202 }
203
204
AllocateInternalizedStringFromUtf8(Vector<const char> str,int chars,uint32_t hash_field)205 AllocationResult Heap::AllocateInternalizedStringFromUtf8(
206 Vector<const char> str, int chars, uint32_t hash_field) {
207 if (IsOneByte(str, chars)) {
208 return AllocateOneByteInternalizedString(Vector<const uint8_t>::cast(str),
209 hash_field);
210 }
211 return AllocateInternalizedStringImpl<false>(str, chars, hash_field);
212 }
213
214
215 template <typename T>
AllocateInternalizedStringImpl(T t,int chars,uint32_t hash_field)216 AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
217 uint32_t hash_field) {
218 if (IsOneByte(t, chars)) {
219 return AllocateInternalizedStringImpl<true>(t, chars, hash_field);
220 }
221 return AllocateInternalizedStringImpl<false>(t, chars, hash_field);
222 }
223
224
AllocateOneByteInternalizedString(Vector<const uint8_t> str,uint32_t hash_field)225 AllocationResult Heap::AllocateOneByteInternalizedString(
226 Vector<const uint8_t> str, uint32_t hash_field) {
227 CHECK_GE(String::kMaxLength, str.length());
228 // The canonical empty_string is the only zero-length string we allow.
229 DCHECK_IMPLIES(str.length() == 0, roots_[kempty_stringRootIndex] == nullptr);
230 // Compute map and object size.
231 Map* map = one_byte_internalized_string_map();
232 int size = SeqOneByteString::SizeFor(str.length());
233
234 // Allocate string.
235 HeapObject* result = nullptr;
236 {
237 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
238 if (!allocation.To(&result)) return allocation;
239 }
240
241 // String maps are all immortal immovable objects.
242 result->set_map_no_write_barrier(map);
243 // Set length and hash fields of the allocated string.
244 String* answer = String::cast(result);
245 answer->set_length(str.length());
246 answer->set_hash_field(hash_field);
247
248 DCHECK_EQ(size, answer->Size());
249
250 // Fill in the characters.
251 MemCopy(answer->address() + SeqOneByteString::kHeaderSize, str.start(),
252 str.length());
253
254 return answer;
255 }
256
257
AllocateTwoByteInternalizedString(Vector<const uc16> str,uint32_t hash_field)258 AllocationResult Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
259 uint32_t hash_field) {
260 CHECK_GE(String::kMaxLength, str.length());
261 DCHECK_NE(0, str.length()); // Use Heap::empty_string() instead.
262 // Compute map and object size.
263 Map* map = internalized_string_map();
264 int size = SeqTwoByteString::SizeFor(str.length());
265
266 // Allocate string.
267 HeapObject* result = nullptr;
268 {
269 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
270 if (!allocation.To(&result)) return allocation;
271 }
272
273 result->set_map(map);
274 // Set length and hash fields of the allocated string.
275 String* answer = String::cast(result);
276 answer->set_length(str.length());
277 answer->set_hash_field(hash_field);
278
279 DCHECK_EQ(size, answer->Size());
280
281 // Fill in the characters.
282 MemCopy(answer->address() + SeqTwoByteString::kHeaderSize, str.start(),
283 str.length() * kUC16Size);
284
285 return answer;
286 }
287
CopyFixedArray(FixedArray * src)288 AllocationResult Heap::CopyFixedArray(FixedArray* src) {
289 if (src->length() == 0) return src;
290 return CopyFixedArrayWithMap(src, src->map());
291 }
292
293
CopyFixedDoubleArray(FixedDoubleArray * src)294 AllocationResult Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
295 if (src->length() == 0) return src;
296 return CopyFixedDoubleArrayWithMap(src, src->map());
297 }
298
299
AllocateRaw(int size_in_bytes,AllocationSpace space,AllocationAlignment alignment)300 AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
301 AllocationAlignment alignment) {
302 DCHECK(AllowHandleAllocation::IsAllowed());
303 DCHECK(AllowHeapAllocation::IsAllowed());
304 DCHECK(gc_state_ == NOT_IN_GC);
305 #ifdef DEBUG
306 if (FLAG_gc_interval >= 0 && !always_allocate() &&
307 Heap::allocation_timeout_-- <= 0) {
308 return AllocationResult::Retry(space);
309 }
310 isolate_->counters()->objs_since_last_full()->Increment();
311 isolate_->counters()->objs_since_last_young()->Increment();
312 #endif
313
314 bool large_object = size_in_bytes > kMaxRegularHeapObjectSize;
315 HeapObject* object = nullptr;
316 AllocationResult allocation;
317 if (NEW_SPACE == space) {
318 if (large_object) {
319 space = LO_SPACE;
320 } else {
321 allocation = new_space_->AllocateRaw(size_in_bytes, alignment);
322 if (allocation.To(&object)) {
323 OnAllocationEvent(object, size_in_bytes);
324 }
325 return allocation;
326 }
327 }
328
329 // Here we only allocate in the old generation.
330 if (OLD_SPACE == space) {
331 if (large_object) {
332 allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
333 } else {
334 allocation = old_space_->AllocateRaw(size_in_bytes, alignment);
335 }
336 } else if (CODE_SPACE == space) {
337 if (size_in_bytes <= code_space()->AreaSize()) {
338 allocation = code_space_->AllocateRawUnaligned(size_in_bytes);
339 } else {
340 allocation = lo_space_->AllocateRaw(size_in_bytes, EXECUTABLE);
341 }
342 } else if (LO_SPACE == space) {
343 DCHECK(large_object);
344 allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
345 } else if (MAP_SPACE == space) {
346 allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
347 } else {
348 // NEW_SPACE is not allowed here.
349 UNREACHABLE();
350 }
351 if (allocation.To(&object)) {
352 OnAllocationEvent(object, size_in_bytes);
353 }
354
355 return allocation;
356 }
357
358
OnAllocationEvent(HeapObject * object,int size_in_bytes)359 void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) {
360 HeapProfiler* profiler = isolate_->heap_profiler();
361 if (profiler->is_tracking_allocations()) {
362 profiler->AllocationEvent(object->address(), size_in_bytes);
363 }
364
365 if (FLAG_verify_predictable) {
366 ++allocations_count_;
367 // Advance synthetic time by making a time request.
368 MonotonicallyIncreasingTimeInMs();
369
370 UpdateAllocationsHash(object);
371 UpdateAllocationsHash(size_in_bytes);
372
373 if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
374 PrintAlloctionsHash();
375 }
376 }
377
378 if (FLAG_trace_allocation_stack_interval > 0) {
379 if (!FLAG_verify_predictable) ++allocations_count_;
380 if (allocations_count_ % FLAG_trace_allocation_stack_interval == 0) {
381 isolate()->PrintStack(stdout, Isolate::kPrintStackConcise);
382 }
383 }
384 }
385
386
OnMoveEvent(HeapObject * target,HeapObject * source,int size_in_bytes)387 void Heap::OnMoveEvent(HeapObject* target, HeapObject* source,
388 int size_in_bytes) {
389 HeapProfiler* heap_profiler = isolate_->heap_profiler();
390 if (heap_profiler->is_tracking_object_moves()) {
391 heap_profiler->ObjectMoveEvent(source->address(), target->address(),
392 size_in_bytes);
393 }
394 if (target->IsSharedFunctionInfo()) {
395 LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source->address(),
396 target->address()));
397 }
398
399 if (FLAG_verify_predictable) {
400 ++allocations_count_;
401 // Advance synthetic time by making a time request.
402 MonotonicallyIncreasingTimeInMs();
403
404 UpdateAllocationsHash(source);
405 UpdateAllocationsHash(target);
406 UpdateAllocationsHash(size_in_bytes);
407
408 if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
409 PrintAlloctionsHash();
410 }
411 }
412 }
413
414
UpdateAllocationsHash(HeapObject * object)415 void Heap::UpdateAllocationsHash(HeapObject* object) {
416 Address object_address = object->address();
417 MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
418 AllocationSpace allocation_space = memory_chunk->owner()->identity();
419
420 STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32);
421 uint32_t value =
422 static_cast<uint32_t>(object_address - memory_chunk->address()) |
423 (static_cast<uint32_t>(allocation_space) << kPageSizeBits);
424
425 UpdateAllocationsHash(value);
426 }
427
428
UpdateAllocationsHash(uint32_t value)429 void Heap::UpdateAllocationsHash(uint32_t value) {
430 uint16_t c1 = static_cast<uint16_t>(value);
431 uint16_t c2 = static_cast<uint16_t>(value >> 16);
432 raw_allocations_hash_ =
433 StringHasher::AddCharacterCore(raw_allocations_hash_, c1);
434 raw_allocations_hash_ =
435 StringHasher::AddCharacterCore(raw_allocations_hash_, c2);
436 }
437
438
RegisterExternalString(String * string)439 void Heap::RegisterExternalString(String* string) {
440 external_string_table_.AddString(string);
441 }
442
443
FinalizeExternalString(String * string)444 void Heap::FinalizeExternalString(String* string) {
445 DCHECK(string->IsExternalString());
446 v8::String::ExternalStringResourceBase** resource_addr =
447 reinterpret_cast<v8::String::ExternalStringResourceBase**>(
448 reinterpret_cast<byte*>(string) + ExternalString::kResourceOffset -
449 kHeapObjectTag);
450
451 // Dispose of the C++ object if it has not already been disposed.
452 if (*resource_addr != NULL) {
453 (*resource_addr)->Dispose();
454 *resource_addr = NULL;
455 }
456 }
457
NewSpaceTop()458 Address Heap::NewSpaceTop() { return new_space_->top(); }
459
DeoptMaybeTenuredAllocationSites()460 bool Heap::DeoptMaybeTenuredAllocationSites() {
461 return new_space_->IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
462 }
463
InNewSpace(Object * object)464 bool Heap::InNewSpace(Object* object) {
465 // Inlined check from NewSpace::Contains.
466 bool result =
467 object->IsHeapObject() &&
468 Page::FromAddress(HeapObject::cast(object)->address())->InNewSpace();
469 DCHECK(!result || // Either not in new space
470 gc_state_ != NOT_IN_GC || // ... or in the middle of GC
471 InToSpace(object)); // ... or in to-space (where we allocate).
472 return result;
473 }
474
InFromSpace(Object * object)475 bool Heap::InFromSpace(Object* object) {
476 return object->IsHeapObject() &&
477 MemoryChunk::FromAddress(HeapObject::cast(object)->address())
478 ->IsFlagSet(Page::IN_FROM_SPACE);
479 }
480
481
InToSpace(Object * object)482 bool Heap::InToSpace(Object* object) {
483 return object->IsHeapObject() &&
484 MemoryChunk::FromAddress(HeapObject::cast(object)->address())
485 ->IsFlagSet(Page::IN_TO_SPACE);
486 }
487
InOldSpace(Object * object)488 bool Heap::InOldSpace(Object* object) { return old_space_->Contains(object); }
489
InNewSpaceSlow(Address address)490 bool Heap::InNewSpaceSlow(Address address) {
491 return new_space_->ContainsSlow(address);
492 }
493
InOldSpaceSlow(Address address)494 bool Heap::InOldSpaceSlow(Address address) {
495 return old_space_->ContainsSlow(address);
496 }
497
ShouldBePromoted(Address old_address,int object_size)498 bool Heap::ShouldBePromoted(Address old_address, int object_size) {
499 Page* page = Page::FromAddress(old_address);
500 Address age_mark = new_space_->age_mark();
501 return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
502 (!page->ContainsLimit(age_mark) || old_address < age_mark);
503 }
504
RecordWrite(Object * object,int offset,Object * o)505 void Heap::RecordWrite(Object* object, int offset, Object* o) {
506 if (!InNewSpace(o) || !object->IsHeapObject() || InNewSpace(object)) {
507 return;
508 }
509 store_buffer()->InsertEntry(HeapObject::cast(object)->address() + offset);
510 }
511
RecordWriteIntoCode(Code * host,RelocInfo * rinfo,Object * value)512 void Heap::RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* value) {
513 if (InNewSpace(value)) {
514 RecordWriteIntoCodeSlow(host, rinfo, value);
515 }
516 }
517
RecordFixedArrayElements(FixedArray * array,int offset,int length)518 void Heap::RecordFixedArrayElements(FixedArray* array, int offset, int length) {
519 if (InNewSpace(array)) return;
520 for (int i = 0; i < length; i++) {
521 if (!InNewSpace(array->get(offset + i))) continue;
522 store_buffer()->InsertEntry(
523 reinterpret_cast<Address>(array->RawFieldOfElementAt(offset + i)));
524 }
525 }
526
store_buffer_top_address()527 Address* Heap::store_buffer_top_address() {
528 return store_buffer()->top_address();
529 }
530
AllowedToBeMigrated(HeapObject * obj,AllocationSpace dst)531 bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
532 // Object migration is governed by the following rules:
533 //
534 // 1) Objects in new-space can be migrated to the old space
535 // that matches their target space or they stay in new-space.
536 // 2) Objects in old-space stay in the same space when migrating.
537 // 3) Fillers (two or more words) can migrate due to left-trimming of
538 // fixed arrays in new-space or old space.
539 // 4) Fillers (one word) can never migrate, they are skipped by
540 // incremental marking explicitly to prevent invalid pattern.
541 //
542 // Since this function is used for debugging only, we do not place
543 // asserts here, but check everything explicitly.
544 if (obj->map() == one_pointer_filler_map()) return false;
545 InstanceType type = obj->map()->instance_type();
546 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
547 AllocationSpace src = chunk->owner()->identity();
548 switch (src) {
549 case NEW_SPACE:
550 return dst == src || dst == OLD_SPACE;
551 case OLD_SPACE:
552 return dst == src &&
553 (dst == OLD_SPACE || obj->IsFiller() || obj->IsExternalString());
554 case CODE_SPACE:
555 return dst == src && type == CODE_TYPE;
556 case MAP_SPACE:
557 case LO_SPACE:
558 return false;
559 }
560 UNREACHABLE();
561 return false;
562 }
563
CopyBlock(Address dst,Address src,int byte_size)564 void Heap::CopyBlock(Address dst, Address src, int byte_size) {
565 CopyWords(reinterpret_cast<Object**>(dst), reinterpret_cast<Object**>(src),
566 static_cast<size_t>(byte_size / kPointerSize));
567 }
568
569 template <Heap::FindMementoMode mode>
FindAllocationMemento(HeapObject * object)570 AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
571 Address object_address = object->address();
572 Address memento_address = object_address + object->Size();
573 Address last_memento_word_address = memento_address + kPointerSize;
574 // If the memento would be on another page, bail out immediately.
575 if (!Page::OnSamePage(object_address, last_memento_word_address)) {
576 return nullptr;
577 }
578 HeapObject* candidate = HeapObject::FromAddress(memento_address);
579 Map* candidate_map = candidate->map();
580 // This fast check may peek at an uninitialized word. However, the slow check
581 // below (memento_address == top) ensures that this is safe. Mark the word as
582 // initialized to silence MemorySanitizer warnings.
583 MSAN_MEMORY_IS_INITIALIZED(&candidate_map, sizeof(candidate_map));
584 if (candidate_map != allocation_memento_map()) {
585 return nullptr;
586 }
587
588 // Bail out if the memento is below the age mark, which can happen when
589 // mementos survived because a page got moved within new space.
590 Page* object_page = Page::FromAddress(object_address);
591 if (object_page->IsFlagSet(Page::NEW_SPACE_BELOW_AGE_MARK)) {
592 Address age_mark =
593 reinterpret_cast<SemiSpace*>(object_page->owner())->age_mark();
594 if (!object_page->Contains(age_mark)) {
595 return nullptr;
596 }
597 // Do an exact check in the case where the age mark is on the same page.
598 if (object_address < age_mark) {
599 return nullptr;
600 }
601 }
602
603 AllocationMemento* memento_candidate = AllocationMemento::cast(candidate);
604
605 // Depending on what the memento is used for, we might need to perform
606 // additional checks.
607 Address top;
608 switch (mode) {
609 case Heap::kForGC:
610 return memento_candidate;
611 case Heap::kForRuntime:
612 if (memento_candidate == nullptr) return nullptr;
613 // Either the object is the last object in the new space, or there is
614 // another object of at least word size (the header map word) following
615 // it, so suffices to compare ptr and top here.
616 top = NewSpaceTop();
617 DCHECK(memento_address == top ||
618 memento_address + HeapObject::kHeaderSize <= top ||
619 !Page::OnSamePage(memento_address, top - 1));
620 if ((memento_address != top) && memento_candidate->IsValid()) {
621 return memento_candidate;
622 }
623 return nullptr;
624 default:
625 UNREACHABLE();
626 }
627 UNREACHABLE();
628 return nullptr;
629 }
630
631 template <Heap::UpdateAllocationSiteMode mode>
UpdateAllocationSite(HeapObject * object,base::HashMap * pretenuring_feedback)632 void Heap::UpdateAllocationSite(HeapObject* object,
633 base::HashMap* pretenuring_feedback) {
634 DCHECK(InFromSpace(object) ||
635 (InToSpace(object) &&
636 Page::FromAddress(object->address())
637 ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) ||
638 (!InNewSpace(object) &&
639 Page::FromAddress(object->address())
640 ->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)));
641 if (!FLAG_allocation_site_pretenuring ||
642 !AllocationSite::CanTrack(object->map()->instance_type()))
643 return;
644 AllocationMemento* memento_candidate = FindAllocationMemento<kForGC>(object);
645 if (memento_candidate == nullptr) return;
646
647 if (mode == kGlobal) {
648 DCHECK_EQ(pretenuring_feedback, global_pretenuring_feedback_);
649 // Entering global pretenuring feedback is only used in the scavenger, where
650 // we are allowed to actually touch the allocation site.
651 if (!memento_candidate->IsValid()) return;
652 AllocationSite* site = memento_candidate->GetAllocationSite();
653 DCHECK(!site->IsZombie());
654 // For inserting in the global pretenuring storage we need to first
655 // increment the memento found count on the allocation site.
656 if (site->IncrementMementoFoundCount()) {
657 global_pretenuring_feedback_->LookupOrInsert(site,
658 ObjectHash(site->address()));
659 }
660 } else {
661 DCHECK_EQ(mode, kCached);
662 DCHECK_NE(pretenuring_feedback, global_pretenuring_feedback_);
663 // Entering cached feedback is used in the parallel case. We are not allowed
664 // to dereference the allocation site and rather have to postpone all checks
665 // till actually merging the data.
666 Address key = memento_candidate->GetAllocationSiteUnchecked();
667 base::HashMap::Entry* e =
668 pretenuring_feedback->LookupOrInsert(key, ObjectHash(key));
669 DCHECK(e != nullptr);
670 (*bit_cast<intptr_t*>(&e->value))++;
671 }
672 }
673
674
RemoveAllocationSitePretenuringFeedback(AllocationSite * site)675 void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) {
676 global_pretenuring_feedback_->Remove(
677 site, static_cast<uint32_t>(bit_cast<uintptr_t>(site)));
678 }
679
CollectGarbage(AllocationSpace space,GarbageCollectionReason gc_reason,const v8::GCCallbackFlags callbackFlags)680 bool Heap::CollectGarbage(AllocationSpace space,
681 GarbageCollectionReason gc_reason,
682 const v8::GCCallbackFlags callbackFlags) {
683 const char* collector_reason = NULL;
684 GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
685 return CollectGarbage(collector, gc_reason, collector_reason, callbackFlags);
686 }
687
688
isolate()689 Isolate* Heap::isolate() {
690 return reinterpret_cast<Isolate*>(
691 reinterpret_cast<intptr_t>(this) -
692 reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16);
693 }
694
PromoteAllNewSpaceStrings()695 void Heap::ExternalStringTable::PromoteAllNewSpaceStrings() {
696 old_space_strings_.AddAll(new_space_strings_);
697 new_space_strings_.Clear();
698 }
699
AddString(String * string)700 void Heap::ExternalStringTable::AddString(String* string) {
701 DCHECK(string->IsExternalString());
702 if (heap_->InNewSpace(string)) {
703 new_space_strings_.Add(string);
704 } else {
705 old_space_strings_.Add(string);
706 }
707 }
708
IterateNewSpaceStrings(ObjectVisitor * v)709 void Heap::ExternalStringTable::IterateNewSpaceStrings(ObjectVisitor* v) {
710 if (!new_space_strings_.is_empty()) {
711 Object** start = &new_space_strings_[0];
712 v->VisitPointers(start, start + new_space_strings_.length());
713 }
714 }
715
IterateAll(ObjectVisitor * v)716 void Heap::ExternalStringTable::IterateAll(ObjectVisitor* v) {
717 IterateNewSpaceStrings(v);
718 if (!old_space_strings_.is_empty()) {
719 Object** start = &old_space_strings_[0];
720 v->VisitPointers(start, start + old_space_strings_.length());
721 }
722 }
723
724
725 // Verify() is inline to avoid ifdef-s around its calls in release
726 // mode.
Verify()727 void Heap::ExternalStringTable::Verify() {
728 #ifdef DEBUG
729 for (int i = 0; i < new_space_strings_.length(); ++i) {
730 Object* obj = Object::cast(new_space_strings_[i]);
731 DCHECK(heap_->InNewSpace(obj));
732 DCHECK(!obj->IsTheHole(heap_->isolate()));
733 }
734 for (int i = 0; i < old_space_strings_.length(); ++i) {
735 Object* obj = Object::cast(old_space_strings_[i]);
736 DCHECK(!heap_->InNewSpace(obj));
737 DCHECK(!obj->IsTheHole(heap_->isolate()));
738 }
739 #endif
740 }
741
742
AddOldString(String * string)743 void Heap::ExternalStringTable::AddOldString(String* string) {
744 DCHECK(string->IsExternalString());
745 DCHECK(!heap_->InNewSpace(string));
746 old_space_strings_.Add(string);
747 }
748
749
ShrinkNewStrings(int position)750 void Heap::ExternalStringTable::ShrinkNewStrings(int position) {
751 new_space_strings_.Rewind(position);
752 #ifdef VERIFY_HEAP
753 if (FLAG_verify_heap) {
754 Verify();
755 }
756 #endif
757 }
758
ClearInstanceofCache()759 void Heap::ClearInstanceofCache() { set_instanceof_cache_function(Smi::kZero); }
760
ToBoolean(bool condition)761 Oddball* Heap::ToBoolean(bool condition) {
762 return condition ? true_value() : false_value();
763 }
764
765
CompletelyClearInstanceofCache()766 void Heap::CompletelyClearInstanceofCache() {
767 set_instanceof_cache_map(Smi::kZero);
768 set_instanceof_cache_function(Smi::kZero);
769 }
770
771
HashSeed()772 uint32_t Heap::HashSeed() {
773 uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
774 DCHECK(FLAG_randomize_hashes || seed == 0);
775 return seed;
776 }
777
778
NextScriptId()779 int Heap::NextScriptId() {
780 int last_id = last_script_id()->value();
781 if (last_id == Smi::kMaxValue) {
782 last_id = 1;
783 } else {
784 last_id++;
785 }
786 set_last_script_id(Smi::FromInt(last_id));
787 return last_id;
788 }
789
SetArgumentsAdaptorDeoptPCOffset(int pc_offset)790 void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
791 DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::kZero);
792 set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
793 }
794
SetConstructStubCreateDeoptPCOffset(int pc_offset)795 void Heap::SetConstructStubCreateDeoptPCOffset(int pc_offset) {
796 DCHECK(construct_stub_create_deopt_pc_offset() == Smi::kZero);
797 set_construct_stub_create_deopt_pc_offset(Smi::FromInt(pc_offset));
798 }
799
SetConstructStubInvokeDeoptPCOffset(int pc_offset)800 void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) {
801 DCHECK(construct_stub_invoke_deopt_pc_offset() == Smi::kZero);
802 set_construct_stub_invoke_deopt_pc_offset(Smi::FromInt(pc_offset));
803 }
804
SetGetterStubDeoptPCOffset(int pc_offset)805 void Heap::SetGetterStubDeoptPCOffset(int pc_offset) {
806 DCHECK(getter_stub_deopt_pc_offset() == Smi::kZero);
807 set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
808 }
809
SetSetterStubDeoptPCOffset(int pc_offset)810 void Heap::SetSetterStubDeoptPCOffset(int pc_offset) {
811 DCHECK(setter_stub_deopt_pc_offset() == Smi::kZero);
812 set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
813 }
814
SetInterpreterEntryReturnPCOffset(int pc_offset)815 void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
816 DCHECK(interpreter_entry_return_pc_offset() == Smi::kZero);
817 set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
818 }
819
GetNextTemplateSerialNumber()820 int Heap::GetNextTemplateSerialNumber() {
821 int next_serial_number = next_template_serial_number()->value() + 1;
822 set_next_template_serial_number(Smi::FromInt(next_serial_number));
823 return next_serial_number;
824 }
825
SetSerializedTemplates(FixedArray * templates)826 void Heap::SetSerializedTemplates(FixedArray* templates) {
827 DCHECK_EQ(empty_fixed_array(), serialized_templates());
828 DCHECK(isolate()->serializer_enabled());
829 set_serialized_templates(templates);
830 }
831
SetSerializedGlobalProxySizes(FixedArray * sizes)832 void Heap::SetSerializedGlobalProxySizes(FixedArray* sizes) {
833 DCHECK_EQ(empty_fixed_array(), serialized_global_proxy_sizes());
834 DCHECK(isolate()->serializer_enabled());
835 set_serialized_global_proxy_sizes(sizes);
836 }
837
CreateObjectStats()838 void Heap::CreateObjectStats() {
839 if (V8_LIKELY(FLAG_gc_stats == 0)) return;
840 if (!live_object_stats_) {
841 live_object_stats_ = new ObjectStats(this);
842 }
843 if (!dead_object_stats_) {
844 dead_object_stats_ = new ObjectStats(this);
845 }
846 }
847
AlwaysAllocateScope(Isolate * isolate)848 AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
849 : heap_(isolate->heap()) {
850 heap_->always_allocate_scope_count_.Increment(1);
851 }
852
853
~AlwaysAllocateScope()854 AlwaysAllocateScope::~AlwaysAllocateScope() {
855 heap_->always_allocate_scope_count_.Increment(-1);
856 }
857
858
VisitPointers(Object ** start,Object ** end)859 void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
860 for (Object** current = start; current < end; current++) {
861 if ((*current)->IsHeapObject()) {
862 HeapObject* object = HeapObject::cast(*current);
863 CHECK(object->GetIsolate()->heap()->Contains(object));
864 CHECK(object->map()->IsMap());
865 } else {
866 CHECK((*current)->IsSmi());
867 }
868 }
869 }
870
871
VisitPointers(Object ** start,Object ** end)872 void VerifySmisVisitor::VisitPointers(Object** start, Object** end) {
873 for (Object** current = start; current < end; current++) {
874 CHECK((*current)->IsSmi());
875 }
876 }
877 } // namespace internal
878 } // namespace v8
879
880 #endif // V8_HEAP_HEAP_INL_H_
881