1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_HEAP_HEAP_INL_H_
6 #define V8_HEAP_HEAP_INL_H_
7
8 #include <cmath>
9
10 // Clients of this interface shouldn't depend on lots of heap internals.
11 // Do not include anything from src/heap other than src/heap/heap.h and its
12 // write barrier here!
13 #include "src/base/atomic-utils.h"
14 #include "src/base/atomicops.h"
15 #include "src/base/platform/platform.h"
16 #include "src/common/assert-scope.h"
17 #include "src/heap/heap-write-barrier.h"
18 #include "src/heap/heap.h"
19 #include "src/heap/third-party/heap-api.h"
20 #include "src/objects/feedback-vector.h"
21
22 // TODO(gc): There is one more include to remove in order to no longer
23 // leak heap internals to users of this interface!
24 #include "src/execution/isolate-data.h"
25 #include "src/execution/isolate.h"
26 #include "src/heap/code-object-registry.h"
27 #include "src/heap/large-spaces.h"
28 #include "src/heap/memory-allocator.h"
29 #include "src/heap/memory-chunk.h"
30 #include "src/heap/new-spaces-inl.h"
31 #include "src/heap/paged-spaces-inl.h"
32 #include "src/heap/read-only-spaces.h"
33 #include "src/heap/spaces-inl.h"
34 #include "src/objects/allocation-site-inl.h"
35 #include "src/objects/api-callbacks-inl.h"
36 #include "src/objects/cell-inl.h"
37 #include "src/objects/descriptor-array.h"
38 #include "src/objects/feedback-cell-inl.h"
39 #include "src/objects/literal-objects-inl.h"
40 #include "src/objects/objects-inl.h"
41 #include "src/objects/oddball.h"
42 #include "src/objects/property-cell.h"
43 #include "src/objects/scope-info.h"
44 #include "src/objects/script-inl.h"
45 #include "src/objects/slots-inl.h"
46 #include "src/objects/struct-inl.h"
47 #include "src/profiler/heap-profiler.h"
48 #include "src/sanitizer/msan.h"
49 #include "src/strings/string-hasher.h"
50 #include "src/zone/zone-list-inl.h"
51
52 namespace v8 {
53 namespace internal {
54
RetrySpace()55 AllocationSpace AllocationResult::RetrySpace() {
56 DCHECK(IsRetry());
57 return static_cast<AllocationSpace>(Smi::ToInt(object_));
58 }
59
ToObjectChecked()60 HeapObject AllocationResult::ToObjectChecked() {
61 CHECK(!IsRetry());
62 return HeapObject::cast(object_);
63 }
64
ToObject()65 HeapObject AllocationResult::ToObject() {
66 DCHECK(!IsRetry());
67 return HeapObject::cast(object_);
68 }
69
ToAddress()70 Address AllocationResult::ToAddress() {
71 DCHECK(!IsRetry());
72 return HeapObject::cast(object_).address();
73 }
74
isolate()75 Isolate* Heap::isolate() {
76 return reinterpret_cast<Isolate*>(
77 reinterpret_cast<intptr_t>(this) -
78 reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16);
79 }
80
external_memory()81 int64_t Heap::external_memory() { return external_memory_.total(); }
82
update_external_memory(int64_t delta)83 int64_t Heap::update_external_memory(int64_t delta) {
84 return external_memory_.Update(delta);
85 }
86
roots_table()87 RootsTable& Heap::roots_table() { return isolate()->roots_table(); }
88
89 #define ROOT_ACCESSOR(Type, name, CamelName) \
90 Type Heap::name() { \
91 return Type::cast(Object(roots_table()[RootIndex::k##CamelName])); \
92 }
93 MUTABLE_ROOT_LIST(ROOT_ACCESSOR)
94 #undef ROOT_ACCESSOR
95
96 #define ROOT_ACCESSOR(type, name, CamelName) \
97 void Heap::set_##name(type value) { \
98 /* The deserializer makes use of the fact that these common roots are */ \
99 /* never in new space and never on a page that is being compacted. */ \
100 DCHECK_IMPLIES(deserialization_complete(), \
101 !RootsTable::IsImmortalImmovable(RootIndex::k##CamelName)); \
102 DCHECK_IMPLIES(RootsTable::IsImmortalImmovable(RootIndex::k##CamelName), \
103 IsImmovable(HeapObject::cast(value))); \
104 roots_table()[RootIndex::k##CamelName] = value.ptr(); \
105 }
ROOT_LIST(ROOT_ACCESSOR)106 ROOT_LIST(ROOT_ACCESSOR)
107 #undef ROOT_ACCESSOR
108
109 void Heap::SetRootMaterializedObjects(FixedArray objects) {
110 roots_table()[RootIndex::kMaterializedObjects] = objects.ptr();
111 }
112
SetRootScriptList(Object value)113 void Heap::SetRootScriptList(Object value) {
114 roots_table()[RootIndex::kScriptList] = value.ptr();
115 }
116
SetMessageListeners(TemplateList value)117 void Heap::SetMessageListeners(TemplateList value) {
118 roots_table()[RootIndex::kMessageListeners] = value.ptr();
119 }
120
SetPendingOptimizeForTestBytecode(Object hash_table)121 void Heap::SetPendingOptimizeForTestBytecode(Object hash_table) {
122 DCHECK(hash_table.IsObjectHashTable() || hash_table.IsUndefined(isolate()));
123 roots_table()[RootIndex::kPendingOptimizeForTestBytecode] = hash_table.ptr();
124 }
125
paged_space(int idx)126 PagedSpace* Heap::paged_space(int idx) {
127 DCHECK_NE(idx, LO_SPACE);
128 DCHECK_NE(idx, NEW_SPACE);
129 DCHECK_NE(idx, CODE_LO_SPACE);
130 DCHECK_NE(idx, NEW_LO_SPACE);
131 return static_cast<PagedSpace*>(space_[idx]);
132 }
133
space(int idx)134 Space* Heap::space(int idx) { return space_[idx]; }
135
NewSpaceAllocationTopAddress()136 Address* Heap::NewSpaceAllocationTopAddress() {
137 return new_space_->allocation_top_address();
138 }
139
NewSpaceAllocationLimitAddress()140 Address* Heap::NewSpaceAllocationLimitAddress() {
141 return new_space_->allocation_limit_address();
142 }
143
OldSpaceAllocationTopAddress()144 Address* Heap::OldSpaceAllocationTopAddress() {
145 return old_space_->allocation_top_address();
146 }
147
OldSpaceAllocationLimitAddress()148 Address* Heap::OldSpaceAllocationLimitAddress() {
149 return old_space_->allocation_limit_address();
150 }
151
UpdateNewSpaceAllocationCounter()152 void Heap::UpdateNewSpaceAllocationCounter() {
153 new_space_allocation_counter_ = NewSpaceAllocationCounter();
154 }
155
NewSpaceAllocationCounter()156 size_t Heap::NewSpaceAllocationCounter() {
157 return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
158 }
159
code_range()160 inline const base::AddressRegion& Heap::code_range() {
161 #ifdef V8_ENABLE_THIRD_PARTY_HEAP
162 return tp_heap_->GetCodeRange();
163 #else
164 return memory_allocator_->code_range();
165 #endif
166 }
167
AllocateRaw(int size_in_bytes,AllocationType type,AllocationOrigin origin,AllocationAlignment alignment)168 AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
169 AllocationOrigin origin,
170 AllocationAlignment alignment) {
171 DCHECK(AllowHandleAllocation::IsAllowed());
172 DCHECK(AllowHeapAllocation::IsAllowed());
173 DCHECK(AllowGarbageCollection::IsAllowed());
174 DCHECK_IMPLIES(type == AllocationType::kCode || type == AllocationType::kMap,
175 alignment == AllocationAlignment::kWordAligned);
176 DCHECK_EQ(gc_state(), NOT_IN_GC);
177 #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
178 if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
179 if (!always_allocate() && Heap::allocation_timeout_-- <= 0) {
180 return AllocationResult::Retry();
181 }
182 }
183 #endif
184 #ifdef DEBUG
185 IncrementObjectCounters();
186 #endif
187
188 size_t large_object_threshold = MaxRegularHeapObjectSize(type);
189 bool large_object =
190 static_cast<size_t>(size_in_bytes) > large_object_threshold;
191
192 HeapObject object;
193 AllocationResult allocation;
194
195 if (FLAG_single_generation && type == AllocationType::kYoung) {
196 type = AllocationType::kOld;
197 }
198
199 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
200 allocation = tp_heap_->Allocate(size_in_bytes, type, alignment);
201 } else {
202 if (AllocationType::kYoung == type) {
203 if (large_object) {
204 if (FLAG_young_generation_large_objects) {
205 allocation = new_lo_space_->AllocateRaw(size_in_bytes);
206 } else {
207 // If young generation large objects are disalbed we have to tenure
208 // the allocation and violate the given allocation type. This could be
209 // dangerous. We may want to remove
210 // FLAG_young_generation_large_objects and avoid patching.
211 allocation = lo_space_->AllocateRaw(size_in_bytes);
212 }
213 } else {
214 allocation = new_space_->AllocateRaw(size_in_bytes, alignment, origin);
215 }
216 } else if (AllocationType::kOld == type) {
217 if (large_object) {
218 allocation = lo_space_->AllocateRaw(size_in_bytes);
219 } else {
220 allocation = old_space_->AllocateRaw(size_in_bytes, alignment, origin);
221 }
222 } else if (AllocationType::kCode == type) {
223 DCHECK(AllowCodeAllocation::IsAllowed());
224 if (large_object) {
225 allocation = code_lo_space_->AllocateRaw(size_in_bytes);
226 } else {
227 allocation = code_space_->AllocateRawUnaligned(size_in_bytes);
228 }
229 } else if (AllocationType::kMap == type) {
230 allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
231 } else if (AllocationType::kReadOnly == type) {
232 DCHECK(!large_object);
233 DCHECK(CanAllocateInReadOnlySpace());
234 DCHECK_EQ(AllocationOrigin::kRuntime, origin);
235 allocation = read_only_space_->AllocateRaw(size_in_bytes, alignment);
236 } else {
237 UNREACHABLE();
238 }
239 }
240
241 if (allocation.To(&object)) {
242 if (AllocationType::kCode == type && !V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
243 // Unprotect the memory chunk of the object if it was not unprotected
244 // already.
245 UnprotectAndRegisterMemoryChunk(object);
246 ZapCodeObject(object.address(), size_in_bytes);
247 if (!large_object) {
248 MemoryChunk::FromHeapObject(object)
249 ->GetCodeObjectRegistry()
250 ->RegisterNewlyAllocatedCodeObject(object.address());
251 }
252 }
253
254 #ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
255 if (AllocationType::kReadOnly != type) {
256 DCHECK_TAG_ALIGNED(object.address());
257 Page::FromHeapObject(object)->object_start_bitmap()->SetBit(
258 object.address());
259 }
260 #endif
261
262 OnAllocationEvent(object, size_in_bytes);
263 }
264
265 return allocation;
266 }
267
268 template <Heap::AllocationRetryMode mode>
AllocateRawWith(int size,AllocationType allocation,AllocationOrigin origin,AllocationAlignment alignment)269 HeapObject Heap::AllocateRawWith(int size, AllocationType allocation,
270 AllocationOrigin origin,
271 AllocationAlignment alignment) {
272 DCHECK(AllowHandleAllocation::IsAllowed());
273 DCHECK(AllowHeapAllocation::IsAllowed());
274 DCHECK(AllowGarbageCollection::IsAllowed());
275 DCHECK_EQ(gc_state(), NOT_IN_GC);
276 Heap* heap = isolate()->heap();
277 if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL &&
278 allocation == AllocationType::kYoung &&
279 alignment == AllocationAlignment::kWordAligned &&
280 size <= MaxRegularHeapObjectSize(allocation)) {
281 Address* top = heap->NewSpaceAllocationTopAddress();
282 Address* limit = heap->NewSpaceAllocationLimitAddress();
283 if ((*limit - *top >= static_cast<unsigned>(size)) &&
284 V8_LIKELY(!FLAG_single_generation && FLAG_inline_new &&
285 FLAG_gc_interval == 0)) {
286 DCHECK(IsAligned(size, kTaggedSize));
287 HeapObject obj = HeapObject::FromAddress(*top);
288 *top += size;
289 heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
290 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size);
291 return obj;
292 }
293 }
294 switch (mode) {
295 case kLightRetry:
296 return AllocateRawWithLightRetrySlowPath(size, allocation, origin,
297 alignment);
298 case kRetryOrFail:
299 return AllocateRawWithRetryOrFailSlowPath(size, allocation, origin,
300 alignment);
301 }
302 UNREACHABLE();
303 }
304
DeserializerAllocate(AllocationType type,int size_in_bytes)305 Address Heap::DeserializerAllocate(AllocationType type, int size_in_bytes) {
306 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
307 AllocationResult allocation = tp_heap_->Allocate(
308 size_in_bytes, type, AllocationAlignment::kDoubleAligned);
309 return allocation.ToObjectChecked().ptr();
310 } else {
311 UNIMPLEMENTED(); // unimplemented
312 }
313 }
314
OnAllocationEvent(HeapObject object,int size_in_bytes)315 void Heap::OnAllocationEvent(HeapObject object, int size_in_bytes) {
316 for (auto& tracker : allocation_trackers_) {
317 tracker->AllocationEvent(object.address(), size_in_bytes);
318 }
319
320 if (FLAG_verify_predictable) {
321 ++allocations_count_;
322 // Advance synthetic time by making a time request.
323 MonotonicallyIncreasingTimeInMs();
324
325 UpdateAllocationsHash(object);
326 UpdateAllocationsHash(size_in_bytes);
327
328 if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
329 PrintAllocationsHash();
330 }
331 } else if (FLAG_fuzzer_gc_analysis) {
332 ++allocations_count_;
333 } else if (FLAG_trace_allocation_stack_interval > 0) {
334 ++allocations_count_;
335 if (allocations_count_ % FLAG_trace_allocation_stack_interval == 0) {
336 isolate()->PrintStack(stdout, Isolate::kPrintStackConcise);
337 }
338 }
339 }
340
CanAllocateInReadOnlySpace()341 bool Heap::CanAllocateInReadOnlySpace() {
342 return read_only_space()->writable();
343 }
344
UpdateAllocationsHash(HeapObject object)345 void Heap::UpdateAllocationsHash(HeapObject object) {
346 Address object_address = object.address();
347 MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
348 AllocationSpace allocation_space = memory_chunk->owner_identity();
349
350 STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32);
351 uint32_t value =
352 static_cast<uint32_t>(object_address - memory_chunk->address()) |
353 (static_cast<uint32_t>(allocation_space) << kPageSizeBits);
354
355 UpdateAllocationsHash(value);
356 }
357
UpdateAllocationsHash(uint32_t value)358 void Heap::UpdateAllocationsHash(uint32_t value) {
359 uint16_t c1 = static_cast<uint16_t>(value);
360 uint16_t c2 = static_cast<uint16_t>(value >> 16);
361 raw_allocations_hash_ =
362 StringHasher::AddCharacterCore(raw_allocations_hash_, c1);
363 raw_allocations_hash_ =
364 StringHasher::AddCharacterCore(raw_allocations_hash_, c2);
365 }
366
RegisterExternalString(String string)367 void Heap::RegisterExternalString(String string) {
368 DCHECK(string.IsExternalString());
369 DCHECK(!string.IsThinString());
370 external_string_table_.AddString(string);
371 }
372
FinalizeExternalString(String string)373 void Heap::FinalizeExternalString(String string) {
374 DCHECK(string.IsExternalString());
375 Page* page = Page::FromHeapObject(string);
376 ExternalString ext_string = ExternalString::cast(string);
377
378 page->DecrementExternalBackingStoreBytes(
379 ExternalBackingStoreType::kExternalString,
380 ext_string.ExternalPayloadSize());
381
382 ext_string.DisposeResource(isolate());
383 }
384
NewSpaceTop()385 Address Heap::NewSpaceTop() { return new_space_->top(); }
386
InYoungGeneration(Object object)387 bool Heap::InYoungGeneration(Object object) {
388 DCHECK(!HasWeakHeapObjectTag(object));
389 return object.IsHeapObject() && InYoungGeneration(HeapObject::cast(object));
390 }
391
392 // static
InYoungGeneration(MaybeObject object)393 bool Heap::InYoungGeneration(MaybeObject object) {
394 HeapObject heap_object;
395 return object->GetHeapObject(&heap_object) && InYoungGeneration(heap_object);
396 }
397
398 // static
InYoungGeneration(HeapObject heap_object)399 bool Heap::InYoungGeneration(HeapObject heap_object) {
400 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return false;
401 bool result =
402 BasicMemoryChunk::FromHeapObject(heap_object)->InYoungGeneration();
403 #ifdef DEBUG
404 // If in the young generation, then check we're either not in the middle of
405 // GC or the object is in to-space.
406 if (result) {
407 // If the object is in the young generation, then it's not in RO_SPACE so
408 // this is safe.
409 Heap* heap = Heap::FromWritableHeapObject(heap_object);
410 DCHECK_IMPLIES(heap->gc_state() == NOT_IN_GC, InToPage(heap_object));
411 }
412 #endif
413 return result;
414 }
415
416 // static
InFromPage(Object object)417 bool Heap::InFromPage(Object object) {
418 DCHECK(!HasWeakHeapObjectTag(object));
419 return object.IsHeapObject() && InFromPage(HeapObject::cast(object));
420 }
421
422 // static
InFromPage(MaybeObject object)423 bool Heap::InFromPage(MaybeObject object) {
424 HeapObject heap_object;
425 return object->GetHeapObject(&heap_object) && InFromPage(heap_object);
426 }
427
428 // static
InFromPage(HeapObject heap_object)429 bool Heap::InFromPage(HeapObject heap_object) {
430 return BasicMemoryChunk::FromHeapObject(heap_object)->IsFromPage();
431 }
432
433 // static
InToPage(Object object)434 bool Heap::InToPage(Object object) {
435 DCHECK(!HasWeakHeapObjectTag(object));
436 return object.IsHeapObject() && InToPage(HeapObject::cast(object));
437 }
438
439 // static
InToPage(MaybeObject object)440 bool Heap::InToPage(MaybeObject object) {
441 HeapObject heap_object;
442 return object->GetHeapObject(&heap_object) && InToPage(heap_object);
443 }
444
445 // static
InToPage(HeapObject heap_object)446 bool Heap::InToPage(HeapObject heap_object) {
447 return BasicMemoryChunk::FromHeapObject(heap_object)->IsToPage();
448 }
449
InOldSpace(Object object)450 bool Heap::InOldSpace(Object object) { return old_space_->Contains(object); }
451
452 // static
FromWritableHeapObject(HeapObject obj)453 Heap* Heap::FromWritableHeapObject(HeapObject obj) {
454 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
455 return Heap::GetIsolateFromWritableObject(obj)->heap();
456 }
457 BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(obj);
458 // RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
459 // find a heap. The exception is when the ReadOnlySpace is writeable, during
460 // bootstrapping, so explicitly allow this case.
461 SLOW_DCHECK(chunk->IsWritable());
462 Heap* heap = chunk->heap();
463 SLOW_DCHECK(heap != nullptr);
464 return heap;
465 }
466
ShouldBePromoted(Address old_address)467 bool Heap::ShouldBePromoted(Address old_address) {
468 Page* page = Page::FromAddress(old_address);
469 Address age_mark = new_space_->age_mark();
470 return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
471 (!page->ContainsLimit(age_mark) || old_address < age_mark);
472 }
473
CopyBlock(Address dst,Address src,int byte_size)474 void Heap::CopyBlock(Address dst, Address src, int byte_size) {
475 DCHECK(IsAligned(byte_size, kTaggedSize));
476 CopyTagged(dst, src, static_cast<size_t>(byte_size / kTaggedSize));
477 }
478
479 template <Heap::FindMementoMode mode>
FindAllocationMemento(Map map,HeapObject object)480 AllocationMemento Heap::FindAllocationMemento(Map map, HeapObject object) {
481 Address object_address = object.address();
482 Address memento_address = object_address + object.SizeFromMap(map);
483 Address last_memento_word_address = memento_address + kTaggedSize;
484 // If the memento would be on another page, bail out immediately.
485 if (!Page::OnSamePage(object_address, last_memento_word_address)) {
486 return AllocationMemento();
487 }
488 HeapObject candidate = HeapObject::FromAddress(memento_address);
489 ObjectSlot candidate_map_slot = candidate.map_slot();
490 // This fast check may peek at an uninitialized word. However, the slow check
491 // below (memento_address == top) ensures that this is safe. Mark the word as
492 // initialized to silence MemorySanitizer warnings.
493 MSAN_MEMORY_IS_INITIALIZED(candidate_map_slot.address(), kTaggedSize);
494 if (!candidate_map_slot.contains_value(
495 ReadOnlyRoots(this).allocation_memento_map().ptr())) {
496 return AllocationMemento();
497 }
498
499 // Bail out if the memento is below the age mark, which can happen when
500 // mementos survived because a page got moved within new space.
501 Page* object_page = Page::FromAddress(object_address);
502 if (object_page->IsFlagSet(Page::NEW_SPACE_BELOW_AGE_MARK)) {
503 Address age_mark =
504 reinterpret_cast<SemiSpace*>(object_page->owner())->age_mark();
505 if (!object_page->Contains(age_mark)) {
506 return AllocationMemento();
507 }
508 // Do an exact check in the case where the age mark is on the same page.
509 if (object_address < age_mark) {
510 return AllocationMemento();
511 }
512 }
513
514 AllocationMemento memento_candidate = AllocationMemento::cast(candidate);
515
516 // Depending on what the memento is used for, we might need to perform
517 // additional checks.
518 Address top;
519 switch (mode) {
520 case Heap::kForGC:
521 return memento_candidate;
522 case Heap::kForRuntime:
523 if (memento_candidate.is_null()) return AllocationMemento();
524 // Either the object is the last object in the new space, or there is
525 // another object of at least word size (the header map word) following
526 // it, so suffices to compare ptr and top here.
527 top = NewSpaceTop();
528 DCHECK(memento_address == top ||
529 memento_address + HeapObject::kHeaderSize <= top ||
530 !Page::OnSamePage(memento_address, top - 1));
531 if ((memento_address != top) && memento_candidate.IsValid()) {
532 return memento_candidate;
533 }
534 return AllocationMemento();
535 default:
536 UNREACHABLE();
537 }
538 UNREACHABLE();
539 }
540
UpdateAllocationSite(Map map,HeapObject object,PretenuringFeedbackMap * pretenuring_feedback)541 void Heap::UpdateAllocationSite(Map map, HeapObject object,
542 PretenuringFeedbackMap* pretenuring_feedback) {
543 DCHECK_NE(pretenuring_feedback, &global_pretenuring_feedback_);
544 #ifdef DEBUG
545 BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
546 DCHECK_IMPLIES(chunk->IsToPage(),
547 chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION));
548 DCHECK_IMPLIES(!chunk->InYoungGeneration(),
549 chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION));
550 #endif
551 if (!FLAG_allocation_site_pretenuring ||
552 !AllocationSite::CanTrack(map.instance_type())) {
553 return;
554 }
555 AllocationMemento memento_candidate =
556 FindAllocationMemento<kForGC>(map, object);
557 if (memento_candidate.is_null()) return;
558
559 // Entering cached feedback is used in the parallel case. We are not allowed
560 // to dereference the allocation site and rather have to postpone all checks
561 // till actually merging the data.
562 Address key = memento_candidate.GetAllocationSiteUnchecked();
563 (*pretenuring_feedback)[AllocationSite::unchecked_cast(Object(key))]++;
564 }
565
AddString(String string)566 void Heap::ExternalStringTable::AddString(String string) {
567 DCHECK(string.IsExternalString());
568 DCHECK(!Contains(string));
569
570 if (InYoungGeneration(string)) {
571 young_strings_.push_back(string);
572 } else {
573 old_strings_.push_back(string);
574 }
575 }
576
ToBoolean(bool condition)577 Oddball Heap::ToBoolean(bool condition) {
578 ReadOnlyRoots roots(this);
579 return condition ? roots.true_value() : roots.false_value();
580 }
581
NextScriptId()582 int Heap::NextScriptId() {
583 FullObjectSlot last_script_id_slot(&roots_table()[RootIndex::kLastScriptId]);
584 Smi last_id = Smi::cast(last_script_id_slot.Relaxed_Load());
585 Smi new_id, last_id_before_cas;
586 do {
587 if (last_id.value() == Smi::kMaxValue) {
588 STATIC_ASSERT(v8::UnboundScript::kNoScriptId == 0);
589 new_id = Smi::FromInt(1);
590 } else {
591 new_id = Smi::FromInt(last_id.value() + 1);
592 }
593
594 // CAS returns the old value on success, and the current value in the slot
595 // on failure. Therefore, we want to break if the returned value matches the
596 // old value (last_id), and keep looping (with the new last_id value) if it
597 // doesn't.
598 last_id_before_cas = last_id;
599 last_id =
600 Smi::cast(last_script_id_slot.Relaxed_CompareAndSwap(last_id, new_id));
601 } while (last_id != last_id_before_cas);
602
603 return new_id.value();
604 }
605
NextDebuggingId()606 int Heap::NextDebuggingId() {
607 int last_id = last_debugging_id().value();
608 if (last_id == DebugInfo::DebuggingIdBits::kMax) {
609 last_id = DebugInfo::kNoDebuggingId;
610 }
611 last_id++;
612 set_last_debugging_id(Smi::FromInt(last_id));
613 return last_id;
614 }
615
GetNextTemplateSerialNumber()616 int Heap::GetNextTemplateSerialNumber() {
617 int next_serial_number = next_template_serial_number().value() + 1;
618 set_next_template_serial_number(Smi::FromInt(next_serial_number));
619 return next_serial_number;
620 }
621
MaxNumberToStringCacheSize()622 int Heap::MaxNumberToStringCacheSize() const {
623 // Compute the size of the number string cache based on the max newspace size.
624 // The number string cache has a minimum size based on twice the initial cache
625 // size to ensure that it is bigger after being made 'full size'.
626 size_t number_string_cache_size = max_semi_space_size_ / 512;
627 number_string_cache_size =
628 Max(static_cast<size_t>(kInitialNumberStringCacheSize * 2),
629 Min<size_t>(0x4000u, number_string_cache_size));
630 // There is a string and a number per entry so the length is twice the number
631 // of entries.
632 return static_cast<int>(number_string_cache_size * 2);
633 }
634
IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,size_t amount)635 void Heap::IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
636 size_t amount) {
637 base::CheckedIncrement(&backing_store_bytes_, amount);
638 // TODO(mlippautz): Implement interrupt for global memory allocations that can
639 // trigger garbage collections.
640 }
641
DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,size_t amount)642 void Heap::DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
643 size_t amount) {
644 base::CheckedDecrement(&backing_store_bytes_, amount);
645 }
646
HasDirtyJSFinalizationRegistries()647 bool Heap::HasDirtyJSFinalizationRegistries() {
648 return !dirty_js_finalization_registries_list().IsUndefined(isolate());
649 }
650
AlwaysAllocateScope(Heap * heap)651 AlwaysAllocateScope::AlwaysAllocateScope(Heap* heap) : heap_(heap) {
652 heap_->always_allocate_scope_count_++;
653 }
654
~AlwaysAllocateScope()655 AlwaysAllocateScope::~AlwaysAllocateScope() {
656 heap_->always_allocate_scope_count_--;
657 }
658
AlwaysAllocateScopeForTesting(Heap * heap)659 AlwaysAllocateScopeForTesting::AlwaysAllocateScopeForTesting(Heap* heap)
660 : scope_(heap) {}
661
CodeSpaceMemoryModificationScope(Heap * heap)662 CodeSpaceMemoryModificationScope::CodeSpaceMemoryModificationScope(Heap* heap)
663 : heap_(heap) {
664 if (heap_->write_protect_code_memory()) {
665 heap_->increment_code_space_memory_modification_scope_depth();
666 heap_->code_space()->SetReadAndWritable();
667 LargePage* page = heap_->code_lo_space()->first_page();
668 while (page != nullptr) {
669 DCHECK(page->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
670 CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
671 page->SetReadAndWritable();
672 page = page->next_page();
673 }
674 }
675 }
676
~CodeSpaceMemoryModificationScope()677 CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
678 if (heap_->write_protect_code_memory()) {
679 heap_->decrement_code_space_memory_modification_scope_depth();
680 heap_->code_space()->SetDefaultCodePermissions();
681 LargePage* page = heap_->code_lo_space()->first_page();
682 while (page != nullptr) {
683 DCHECK(page->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
684 CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
685 page->SetDefaultCodePermissions();
686 page = page->next_page();
687 }
688 }
689 }
690
691 CodePageCollectionMemoryModificationScope::
CodePageCollectionMemoryModificationScope(Heap * heap)692 CodePageCollectionMemoryModificationScope(Heap* heap)
693 : heap_(heap) {
694 if (heap_->write_protect_code_memory() &&
695 !heap_->code_space_memory_modification_scope_depth()) {
696 heap_->EnableUnprotectedMemoryChunksRegistry();
697 }
698 }
699
700 CodePageCollectionMemoryModificationScope::
~CodePageCollectionMemoryModificationScope()701 ~CodePageCollectionMemoryModificationScope() {
702 if (heap_->write_protect_code_memory() &&
703 !heap_->code_space_memory_modification_scope_depth()) {
704 heap_->ProtectUnprotectedMemoryChunks();
705 heap_->DisableUnprotectedMemoryChunksRegistry();
706 }
707 }
708
709 #ifdef V8_ENABLE_THIRD_PARTY_HEAP
CodePageMemoryModificationScope(Code code)710 CodePageMemoryModificationScope::CodePageMemoryModificationScope(Code code)
711 : chunk_(nullptr), scope_active_(false) {}
712 #else
CodePageMemoryModificationScope(Code code)713 CodePageMemoryModificationScope::CodePageMemoryModificationScope(Code code)
714 : CodePageMemoryModificationScope(BasicMemoryChunk::FromHeapObject(code)) {}
715 #endif
716
CodePageMemoryModificationScope(BasicMemoryChunk * chunk)717 CodePageMemoryModificationScope::CodePageMemoryModificationScope(
718 BasicMemoryChunk* chunk)
719 : chunk_(chunk),
720 scope_active_(chunk_->heap()->write_protect_code_memory() &&
721 chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
722 if (scope_active_) {
723 DCHECK(chunk_->owner()->identity() == CODE_SPACE ||
724 (chunk_->owner()->identity() == CODE_LO_SPACE));
725 MemoryChunk::cast(chunk_)->SetReadAndWritable();
726 }
727 }
728
~CodePageMemoryModificationScope()729 CodePageMemoryModificationScope::~CodePageMemoryModificationScope() {
730 if (scope_active_) {
731 MemoryChunk::cast(chunk_)->SetDefaultCodePermissions();
732 }
733 }
734
735 } // namespace internal
736 } // namespace v8
737
738 #endif // V8_HEAP_HEAP_INL_H_
739