• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_HEAP_INL_H_
6 #define V8_HEAP_HEAP_INL_H_
7 
8 #include <atomic>
9 #include <cmath>
10 
11 // Clients of this interface shouldn't depend on lots of heap internals.
12 // Avoid including anything but `heap.h` from `src/heap` where possible.
13 #include "src/base/atomic-utils.h"
14 #include "src/base/atomicops.h"
15 #include "src/base/platform/mutex.h"
16 #include "src/base/platform/platform.h"
17 #include "src/base/sanitizer/msan.h"
18 #include "src/common/assert-scope.h"
19 #include "src/execution/isolate-data.h"
20 #include "src/execution/isolate.h"
21 #include "src/heap/code-object-registry.h"
22 #include "src/heap/concurrent-allocator-inl.h"
23 #include "src/heap/concurrent-allocator.h"
24 #include "src/heap/heap-allocator-inl.h"
25 #include "src/heap/heap-write-barrier.h"
26 #include "src/heap/heap.h"
27 #include "src/heap/large-spaces.h"
28 #include "src/heap/memory-allocator.h"
29 #include "src/heap/memory-chunk-layout.h"
30 #include "src/heap/memory-chunk.h"
31 #include "src/heap/new-spaces-inl.h"
32 #include "src/heap/paged-spaces-inl.h"
33 #include "src/heap/read-only-heap.h"
34 #include "src/heap/read-only-spaces.h"
35 #include "src/heap/safepoint.h"
36 #include "src/heap/spaces-inl.h"
37 #include "src/heap/third-party/heap-api.h"
38 #include "src/objects/allocation-site-inl.h"
39 #include "src/objects/cell-inl.h"
40 #include "src/objects/descriptor-array.h"
41 #include "src/objects/feedback-cell-inl.h"
42 #include "src/objects/feedback-vector.h"
43 #include "src/objects/objects-inl.h"
44 #include "src/objects/oddball.h"
45 #include "src/objects/property-cell.h"
46 #include "src/objects/scope-info.h"
47 #include "src/objects/slots-inl.h"
48 #include "src/objects/struct-inl.h"
49 #include "src/objects/visitors-inl.h"
50 #include "src/profiler/heap-profiler.h"
51 #include "src/strings/string-hasher.h"
52 #include "src/utils/ostreams.h"
53 #include "src/zone/zone-list-inl.h"
54 
55 namespace v8 {
56 namespace internal {
57 
58 template <typename T>
ForwardingAddress(T heap_obj)59 T ForwardingAddress(T heap_obj) {
60   MapWord map_word = heap_obj.map_word(kRelaxedLoad);
61 
62   if (map_word.IsForwardingAddress()) {
63     return T::cast(map_word.ToForwardingAddress());
64   } else if (Heap::InFromPage(heap_obj)) {
65     return T();
66   } else {
67     return heap_obj;
68   }
69 }
70 
71 // static
GetCodeFlushMode(Isolate * isolate)72 base::EnumSet<CodeFlushMode> Heap::GetCodeFlushMode(Isolate* isolate) {
73   if (isolate->disable_bytecode_flushing()) {
74     return base::EnumSet<CodeFlushMode>();
75   }
76 
77   base::EnumSet<CodeFlushMode> code_flush_mode;
78   if (FLAG_flush_bytecode) {
79     code_flush_mode.Add(CodeFlushMode::kFlushBytecode);
80   }
81 
82   if (FLAG_flush_baseline_code) {
83     code_flush_mode.Add(CodeFlushMode::kFlushBaselineCode);
84   }
85 
86   if (FLAG_stress_flush_code) {
87     // This is to check tests accidentally don't miss out on adding either flush
88     // bytecode or flush code along with stress flush code. stress_flush_code
89     // doesn't do anything if either one of them isn't enabled.
90     DCHECK(FLAG_fuzzing || FLAG_flush_baseline_code || FLAG_flush_bytecode);
91     code_flush_mode.Add(CodeFlushMode::kStressFlushCode);
92   }
93 
94   return code_flush_mode;
95 }
96 
isolate()97 Isolate* Heap::isolate() { return Isolate::FromHeap(this); }
98 
external_memory()99 int64_t Heap::external_memory() { return external_memory_.total(); }
100 
update_external_memory(int64_t delta)101 int64_t Heap::update_external_memory(int64_t delta) {
102   return external_memory_.Update(delta);
103 }
104 
space_for_maps()105 PagedSpace* Heap::space_for_maps() {
106   return V8_LIKELY(map_space_) ? static_cast<PagedSpace*>(map_space_)
107                                : static_cast<PagedSpace*>(old_space_);
108 }
109 
concurrent_allocator_for_maps()110 ConcurrentAllocator* Heap::concurrent_allocator_for_maps() {
111   return V8_LIKELY(shared_map_allocator_) ? shared_map_allocator_.get()
112                                           : shared_old_allocator_.get();
113 }
114 
roots_table()115 RootsTable& Heap::roots_table() { return isolate()->roots_table(); }
116 
117 #define ROOT_ACCESSOR(Type, name, CamelName)                           \
118   Type Heap::name() {                                                  \
119     return Type::cast(Object(roots_table()[RootIndex::k##CamelName])); \
120   }
121 MUTABLE_ROOT_LIST(ROOT_ACCESSOR)
122 #undef ROOT_ACCESSOR
123 
124 #define ROOT_ACCESSOR(type, name, CamelName)                                   \
125   void Heap::set_##name(type value) {                                          \
126     /* The deserializer makes use of the fact that these common roots are */   \
127     /* never in new space and never on a page that is being compacted.    */   \
128     DCHECK_IMPLIES(deserialization_complete(),                                 \
129                    !RootsTable::IsImmortalImmovable(RootIndex::k##CamelName)); \
130     DCHECK_IMPLIES(RootsTable::IsImmortalImmovable(RootIndex::k##CamelName),   \
131                    IsImmovable(HeapObject::cast(value)));                      \
132     roots_table()[RootIndex::k##CamelName] = value.ptr();                      \
133   }
ROOT_LIST(ROOT_ACCESSOR)134 ROOT_LIST(ROOT_ACCESSOR)
135 #undef ROOT_ACCESSOR
136 
137 void Heap::SetRootMaterializedObjects(FixedArray objects) {
138   roots_table()[RootIndex::kMaterializedObjects] = objects.ptr();
139 }
140 
SetRootScriptList(Object value)141 void Heap::SetRootScriptList(Object value) {
142   roots_table()[RootIndex::kScriptList] = value.ptr();
143 }
144 
SetMessageListeners(TemplateList value)145 void Heap::SetMessageListeners(TemplateList value) {
146   roots_table()[RootIndex::kMessageListeners] = value.ptr();
147 }
148 
SetPendingOptimizeForTestBytecode(Object hash_table)149 void Heap::SetPendingOptimizeForTestBytecode(Object hash_table) {
150   DCHECK(hash_table.IsObjectHashTable() || hash_table.IsUndefined(isolate()));
151   roots_table()[RootIndex::kPendingOptimizeForTestBytecode] = hash_table.ptr();
152 }
153 
paged_space(int idx)154 PagedSpace* Heap::paged_space(int idx) {
155   DCHECK(idx == OLD_SPACE || idx == CODE_SPACE || idx == MAP_SPACE);
156   return static_cast<PagedSpace*>(space_[idx]);
157 }
158 
space(int idx)159 Space* Heap::space(int idx) { return space_[idx]; }
160 
NewSpaceAllocationTopAddress()161 Address* Heap::NewSpaceAllocationTopAddress() {
162   return new_space_ ? new_space_->allocation_top_address() : nullptr;
163 }
164 
NewSpaceAllocationLimitAddress()165 Address* Heap::NewSpaceAllocationLimitAddress() {
166   return new_space_ ? new_space_->allocation_limit_address() : nullptr;
167 }
168 
OldSpaceAllocationTopAddress()169 Address* Heap::OldSpaceAllocationTopAddress() {
170   return old_space_->allocation_top_address();
171 }
172 
OldSpaceAllocationLimitAddress()173 Address* Heap::OldSpaceAllocationLimitAddress() {
174   return old_space_->allocation_limit_address();
175 }
176 
code_region()177 inline const base::AddressRegion& Heap::code_region() {
178 #ifdef V8_ENABLE_THIRD_PARTY_HEAP
179   return tp_heap_->GetCodeRange();
180 #else
181   static constexpr base::AddressRegion kEmptyRegion;
182   return code_range_ ? code_range_->reservation()->region() : kEmptyRegion;
183 #endif
184 }
185 
code_range_base()186 Address Heap::code_range_base() {
187   return code_range_ ? code_range_->base() : kNullAddress;
188 }
189 
MaxRegularHeapObjectSize(AllocationType allocation)190 int Heap::MaxRegularHeapObjectSize(AllocationType allocation) {
191   if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL &&
192       (allocation == AllocationType::kCode)) {
193     DCHECK_EQ(MemoryChunkLayout::MaxRegularCodeObjectSize(),
194               max_regular_code_object_size_);
195     return max_regular_code_object_size_;
196   }
197   return kMaxRegularHeapObjectSize;
198 }
199 
AllocateRaw(int size_in_bytes,AllocationType type,AllocationOrigin origin,AllocationAlignment alignment)200 AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
201                                    AllocationOrigin origin,
202                                    AllocationAlignment alignment) {
203   return heap_allocator_.AllocateRaw(size_in_bytes, type, origin, alignment);
204 }
205 
AllocateRawOrFail(int size,AllocationType allocation,AllocationOrigin origin,AllocationAlignment alignment)206 Address Heap::AllocateRawOrFail(int size, AllocationType allocation,
207                                 AllocationOrigin origin,
208                                 AllocationAlignment alignment) {
209   return heap_allocator_
210       .AllocateRawWith<HeapAllocator::kRetryOrFail>(size, allocation, origin,
211                                                     alignment)
212       .address();
213 }
214 
RegisterExternalString(String string)215 void Heap::RegisterExternalString(String string) {
216   DCHECK(string.IsExternalString());
217   DCHECK(!string.IsThinString());
218   external_string_table_.AddString(string);
219 }
220 
FinalizeExternalString(String string)221 void Heap::FinalizeExternalString(String string) {
222   DCHECK(string.IsExternalString());
223   ExternalString ext_string = ExternalString::cast(string);
224 
225   if (!FLAG_enable_third_party_heap) {
226     Page* page = Page::FromHeapObject(string);
227     page->DecrementExternalBackingStoreBytes(
228         ExternalBackingStoreType::kExternalString,
229         ext_string.ExternalPayloadSize());
230   }
231 
232   ext_string.DisposeResource(isolate());
233 }
234 
NewSpaceTop()235 Address Heap::NewSpaceTop() {
236   return new_space_ ? new_space_->top() : kNullAddress;
237 }
238 
InYoungGeneration(Object object)239 bool Heap::InYoungGeneration(Object object) {
240   DCHECK(!HasWeakHeapObjectTag(object));
241   return object.IsHeapObject() && InYoungGeneration(HeapObject::cast(object));
242 }
243 
244 // static
InYoungGeneration(MaybeObject object)245 bool Heap::InYoungGeneration(MaybeObject object) {
246   HeapObject heap_object;
247   return object->GetHeapObject(&heap_object) && InYoungGeneration(heap_object);
248 }
249 
250 // static
InYoungGeneration(HeapObject heap_object)251 bool Heap::InYoungGeneration(HeapObject heap_object) {
252   if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return false;
253   bool result =
254       BasicMemoryChunk::FromHeapObject(heap_object)->InYoungGeneration();
255 #ifdef DEBUG
256   // If in the young generation, then check we're either not in the middle of
257   // GC or the object is in to-space.
258   if (result) {
259     // If the object is in the young generation, then it's not in RO_SPACE so
260     // this is safe.
261     Heap* heap = Heap::FromWritableHeapObject(heap_object);
262     DCHECK_IMPLIES(heap->gc_state() == NOT_IN_GC, InToPage(heap_object));
263   }
264 #endif
265   return result;
266 }
267 
268 // static
InFromPage(Object object)269 bool Heap::InFromPage(Object object) {
270   DCHECK(!HasWeakHeapObjectTag(object));
271   return object.IsHeapObject() && InFromPage(HeapObject::cast(object));
272 }
273 
274 // static
InFromPage(MaybeObject object)275 bool Heap::InFromPage(MaybeObject object) {
276   HeapObject heap_object;
277   return object->GetHeapObject(&heap_object) && InFromPage(heap_object);
278 }
279 
280 // static
InFromPage(HeapObject heap_object)281 bool Heap::InFromPage(HeapObject heap_object) {
282   return BasicMemoryChunk::FromHeapObject(heap_object)->IsFromPage();
283 }
284 
285 // static
InToPage(Object object)286 bool Heap::InToPage(Object object) {
287   DCHECK(!HasWeakHeapObjectTag(object));
288   return object.IsHeapObject() && InToPage(HeapObject::cast(object));
289 }
290 
291 // static
InToPage(MaybeObject object)292 bool Heap::InToPage(MaybeObject object) {
293   HeapObject heap_object;
294   return object->GetHeapObject(&heap_object) && InToPage(heap_object);
295 }
296 
297 // static
InToPage(HeapObject heap_object)298 bool Heap::InToPage(HeapObject heap_object) {
299   return BasicMemoryChunk::FromHeapObject(heap_object)->IsToPage();
300 }
301 
InOldSpace(Object object)302 bool Heap::InOldSpace(Object object) {
303   if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
304     return object.IsHeapObject() &&
305            third_party_heap::Heap::InOldSpace(object.ptr());
306   }
307   return old_space_->Contains(object);
308 }
309 
310 // static
FromWritableHeapObject(HeapObject obj)311 Heap* Heap::FromWritableHeapObject(HeapObject obj) {
312   if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
313     return Heap::GetIsolateFromWritableObject(obj)->heap();
314   }
315   BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(obj);
316   // RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
317   // find a heap. The exception is when the ReadOnlySpace is writeable, during
318   // bootstrapping, so explicitly allow this case.
319   SLOW_DCHECK(chunk->IsWritable());
320   Heap* heap = chunk->heap();
321   SLOW_DCHECK(heap != nullptr);
322   return heap;
323 }
324 
ShouldBePromoted(Address old_address)325 bool Heap::ShouldBePromoted(Address old_address) {
326   Page* page = Page::FromAddress(old_address);
327   Address age_mark = new_space_->age_mark();
328   return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
329          (!page->ContainsLimit(age_mark) || old_address < age_mark);
330 }
331 
CopyBlock(Address dst,Address src,int byte_size)332 void Heap::CopyBlock(Address dst, Address src, int byte_size) {
333   DCHECK(IsAligned(byte_size, kTaggedSize));
334   CopyTagged(dst, src, static_cast<size_t>(byte_size / kTaggedSize));
335 }
336 
337 template <Heap::FindMementoMode mode>
FindAllocationMemento(Map map,HeapObject object)338 AllocationMemento Heap::FindAllocationMemento(Map map, HeapObject object) {
339   Address object_address = object.address();
340   Address memento_address = object_address + object.SizeFromMap(map);
341   Address last_memento_word_address = memento_address + kTaggedSize;
342   // If the memento would be on another page, bail out immediately.
343   if (!Page::OnSamePage(object_address, last_memento_word_address)) {
344     return AllocationMemento();
345   }
346   HeapObject candidate = HeapObject::FromAddress(memento_address);
347   ObjectSlot candidate_map_slot = candidate.map_slot();
348   // This fast check may peek at an uninitialized word. However, the slow check
349   // below (memento_address == top) ensures that this is safe. Mark the word as
350   // initialized to silence MemorySanitizer warnings.
351   MSAN_MEMORY_IS_INITIALIZED(candidate_map_slot.address(), kTaggedSize);
352   if (!candidate_map_slot.contains_map_value(
353           ReadOnlyRoots(this).allocation_memento_map().ptr())) {
354     return AllocationMemento();
355   }
356 
357   // Bail out if the memento is below the age mark, which can happen when
358   // mementos survived because a page got moved within new space.
359   Page* object_page = Page::FromAddress(object_address);
360   if (object_page->IsFlagSet(Page::NEW_SPACE_BELOW_AGE_MARK)) {
361     Address age_mark =
362         reinterpret_cast<SemiSpace*>(object_page->owner())->age_mark();
363     if (!object_page->Contains(age_mark)) {
364       return AllocationMemento();
365     }
366     // Do an exact check in the case where the age mark is on the same page.
367     if (object_address < age_mark) {
368       return AllocationMemento();
369     }
370   }
371 
372   AllocationMemento memento_candidate = AllocationMemento::cast(candidate);
373 
374   // Depending on what the memento is used for, we might need to perform
375   // additional checks.
376   Address top;
377   switch (mode) {
378     case Heap::kForGC:
379       return memento_candidate;
380     case Heap::kForRuntime:
381       if (memento_candidate.is_null()) return AllocationMemento();
382       // Either the object is the last object in the new space, or there is
383       // another object of at least word size (the header map word) following
384       // it, so suffices to compare ptr and top here.
385       top = NewSpaceTop();
386       DCHECK(memento_address == top ||
387              memento_address + HeapObject::kHeaderSize <= top ||
388              !Page::OnSamePage(memento_address, top - 1));
389       if ((memento_address != top) && memento_candidate.IsValid()) {
390         return memento_candidate;
391       }
392       return AllocationMemento();
393     default:
394       UNREACHABLE();
395   }
396   UNREACHABLE();
397 }
398 
UpdateAllocationSite(Map map,HeapObject object,PretenuringFeedbackMap * pretenuring_feedback)399 void Heap::UpdateAllocationSite(Map map, HeapObject object,
400                                 PretenuringFeedbackMap* pretenuring_feedback) {
401   DCHECK_NE(pretenuring_feedback, &global_pretenuring_feedback_);
402 #ifdef DEBUG
403   BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
404   DCHECK_IMPLIES(chunk->IsToPage(),
405                  chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION));
406   DCHECK_IMPLIES(!chunk->InYoungGeneration(),
407                  chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION));
408 #endif
409   if (!FLAG_allocation_site_pretenuring ||
410       !AllocationSite::CanTrack(map.instance_type())) {
411     return;
412   }
413   AllocationMemento memento_candidate =
414       FindAllocationMemento<kForGC>(map, object);
415   if (memento_candidate.is_null()) return;
416 
417   // Entering cached feedback is used in the parallel case. We are not allowed
418   // to dereference the allocation site and rather have to postpone all checks
419   // till actually merging the data.
420   Address key = memento_candidate.GetAllocationSiteUnchecked();
421   (*pretenuring_feedback)[AllocationSite::unchecked_cast(Object(key))]++;
422 }
423 
IsPendingAllocationInternal(HeapObject object)424 bool Heap::IsPendingAllocationInternal(HeapObject object) {
425   DCHECK(deserialization_complete());
426 
427   if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
428     return tp_heap_->IsPendingAllocation(object);
429   }
430 
431   BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
432   if (chunk->InReadOnlySpace()) return false;
433 
434   BaseSpace* base_space = chunk->owner();
435   Address addr = object.address();
436 
437   switch (base_space->identity()) {
438     case NEW_SPACE: {
439       base::SharedMutexGuard<base::kShared> guard(
440           new_space_->pending_allocation_mutex());
441       Address top = new_space_->original_top_acquire();
442       Address limit = new_space_->original_limit_relaxed();
443       DCHECK_LE(top, limit);
444       return top && top <= addr && addr < limit;
445     }
446 
447     case OLD_SPACE:
448     case CODE_SPACE:
449     case MAP_SPACE: {
450       PagedSpace* paged_space = static_cast<PagedSpace*>(base_space);
451       base::SharedMutexGuard<base::kShared> guard(
452           paged_space->pending_allocation_mutex());
453       Address top = paged_space->original_top();
454       Address limit = paged_space->original_limit();
455       DCHECK_LE(top, limit);
456       return top && top <= addr && addr < limit;
457     }
458 
459     case LO_SPACE:
460     case CODE_LO_SPACE:
461     case NEW_LO_SPACE: {
462       LargeObjectSpace* large_space =
463           static_cast<LargeObjectSpace*>(base_space);
464       base::SharedMutexGuard<base::kShared> guard(
465           large_space->pending_allocation_mutex());
466       return addr == large_space->pending_object();
467     }
468 
469     case RO_SPACE:
470       UNREACHABLE();
471   }
472 
473   UNREACHABLE();
474 }
475 
IsPendingAllocation(HeapObject object)476 bool Heap::IsPendingAllocation(HeapObject object) {
477   bool result = IsPendingAllocationInternal(object);
478   if (FLAG_trace_pending_allocations && result) {
479     StdoutStream{} << "Pending allocation: " << std::hex << "0x" << object.ptr()
480                    << "\n";
481   }
482   return result;
483 }
484 
IsPendingAllocation(Object object)485 bool Heap::IsPendingAllocation(Object object) {
486   return object.IsHeapObject() && IsPendingAllocation(HeapObject::cast(object));
487 }
488 
AddString(String string)489 void Heap::ExternalStringTable::AddString(String string) {
490   DCHECK(string.IsExternalString());
491   DCHECK(!Contains(string));
492 
493   if (InYoungGeneration(string)) {
494     young_strings_.push_back(string);
495   } else {
496     old_strings_.push_back(string);
497   }
498 }
499 
ToBoolean(bool condition)500 Oddball Heap::ToBoolean(bool condition) {
501   ReadOnlyRoots roots(this);
502   return condition ? roots.true_value() : roots.false_value();
503 }
504 
NextScriptId()505 int Heap::NextScriptId() {
506   FullObjectSlot last_script_id_slot(&roots_table()[RootIndex::kLastScriptId]);
507   Smi last_id = Smi::cast(last_script_id_slot.Relaxed_Load());
508   Smi new_id, last_id_before_cas;
509   do {
510     if (last_id.value() == Smi::kMaxValue) {
511       STATIC_ASSERT(v8::UnboundScript::kNoScriptId == 0);
512       new_id = Smi::FromInt(1);
513     } else {
514       new_id = Smi::FromInt(last_id.value() + 1);
515     }
516 
517     // CAS returns the old value on success, and the current value in the slot
518     // on failure. Therefore, we want to break if the returned value matches the
519     // old value (last_id), and keep looping (with the new last_id value) if it
520     // doesn't.
521     last_id_before_cas = last_id;
522     last_id =
523         Smi::cast(last_script_id_slot.Relaxed_CompareAndSwap(last_id, new_id));
524   } while (last_id != last_id_before_cas);
525 
526   return new_id.value();
527 }
528 
NextDebuggingId()529 int Heap::NextDebuggingId() {
530   int last_id = last_debugging_id().value();
531   if (last_id == DebugInfo::DebuggingIdBits::kMax) {
532     last_id = DebugInfo::kNoDebuggingId;
533   }
534   last_id++;
535   set_last_debugging_id(Smi::FromInt(last_id));
536   return last_id;
537 }
538 
GetNextTemplateSerialNumber()539 int Heap::GetNextTemplateSerialNumber() {
540   int next_serial_number = next_template_serial_number().value();
541   set_next_template_serial_number(Smi::FromInt(next_serial_number + 1));
542   return next_serial_number;
543 }
544 
MaxNumberToStringCacheSize()545 int Heap::MaxNumberToStringCacheSize() const {
546   // Compute the size of the number string cache based on the max newspace size.
547   // The number string cache has a minimum size based on twice the initial cache
548   // size to ensure that it is bigger after being made 'full size'.
549   size_t number_string_cache_size = max_semi_space_size_ / 512;
550   number_string_cache_size =
551       std::max(static_cast<size_t>(kInitialNumberStringCacheSize * 2),
552                std::min(static_cast<size_t>(0x4000), number_string_cache_size));
553   // There is a string and a number per entry so the length is twice the number
554   // of entries.
555   return static_cast<int>(number_string_cache_size * 2);
556 }
557 
IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,size_t amount)558 void Heap::IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
559                                               size_t amount) {
560   base::CheckedIncrement(&backing_store_bytes_, static_cast<uint64_t>(amount),
561                          std::memory_order_relaxed);
562   // TODO(mlippautz): Implement interrupt for global memory allocations that can
563   // trigger garbage collections.
564 }
565 
DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,size_t amount)566 void Heap::DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
567                                               size_t amount) {
568   base::CheckedDecrement(&backing_store_bytes_, static_cast<uint64_t>(amount),
569                          std::memory_order_relaxed);
570 }
571 
HasDirtyJSFinalizationRegistries()572 bool Heap::HasDirtyJSFinalizationRegistries() {
573   return !dirty_js_finalization_registries_list().IsUndefined(isolate());
574 }
575 
VerifyPointersVisitor(Heap * heap)576 VerifyPointersVisitor::VerifyPointersVisitor(Heap* heap)
577     : ObjectVisitorWithCageBases(heap), heap_(heap) {}
578 
AlwaysAllocateScope(Heap * heap)579 AlwaysAllocateScope::AlwaysAllocateScope(Heap* heap) : heap_(heap) {
580   heap_->always_allocate_scope_count_++;
581 }
582 
~AlwaysAllocateScope()583 AlwaysAllocateScope::~AlwaysAllocateScope() {
584   heap_->always_allocate_scope_count_--;
585 }
586 
OptionalAlwaysAllocateScope(Heap * heap)587 OptionalAlwaysAllocateScope::OptionalAlwaysAllocateScope(Heap* heap)
588     : heap_(heap) {
589   if (heap_) heap_->always_allocate_scope_count_++;
590 }
591 
~OptionalAlwaysAllocateScope()592 OptionalAlwaysAllocateScope::~OptionalAlwaysAllocateScope() {
593   if (heap_) heap_->always_allocate_scope_count_--;
594 }
595 
AlwaysAllocateScopeForTesting(Heap * heap)596 AlwaysAllocateScopeForTesting::AlwaysAllocateScopeForTesting(Heap* heap)
597     : scope_(heap) {}
598 
CodeSpaceMemoryModificationScope(Heap * heap)599 CodeSpaceMemoryModificationScope::CodeSpaceMemoryModificationScope(Heap* heap)
600     : heap_(heap) {
601   DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
602   heap_->safepoint()->AssertActive();
603   if (heap_->write_protect_code_memory()) {
604     heap_->increment_code_space_memory_modification_scope_depth();
605     heap_->code_space()->SetCodeModificationPermissions();
606     LargePage* page = heap_->code_lo_space()->first_page();
607     while (page != nullptr) {
608       DCHECK(page->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
609       DCHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
610       page->SetCodeModificationPermissions();
611       page = page->next_page();
612     }
613   }
614 }
615 
~CodeSpaceMemoryModificationScope()616 CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
617   if (heap_->write_protect_code_memory()) {
618     heap_->decrement_code_space_memory_modification_scope_depth();
619     heap_->code_space()->SetDefaultCodePermissions();
620     LargePage* page = heap_->code_lo_space()->first_page();
621     while (page != nullptr) {
622       DCHECK(page->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
623       DCHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
624       page->SetDefaultCodePermissions();
625       page = page->next_page();
626     }
627   }
628 }
629 
630 CodePageCollectionMemoryModificationScope::
CodePageCollectionMemoryModificationScope(Heap * heap)631     CodePageCollectionMemoryModificationScope(Heap* heap)
632     : heap_(heap) {
633   if (heap_->write_protect_code_memory()) {
634     heap_->IncrementCodePageCollectionMemoryModificationScopeDepth();
635   }
636 }
637 
638 CodePageCollectionMemoryModificationScope::
~CodePageCollectionMemoryModificationScope()639     ~CodePageCollectionMemoryModificationScope() {
640   if (heap_->write_protect_code_memory()) {
641     heap_->DecrementCodePageCollectionMemoryModificationScopeDepth();
642     if (heap_->code_page_collection_memory_modification_scope_depth() == 0) {
643       heap_->ProtectUnprotectedMemoryChunks();
644     }
645   }
646 }
647 
648 #ifdef V8_ENABLE_THIRD_PARTY_HEAP
CodePageMemoryModificationScope(Code code)649 CodePageMemoryModificationScope::CodePageMemoryModificationScope(Code code)
650     : chunk_(nullptr), scope_active_(false) {}
651 #else
CodePageMemoryModificationScope(Code code)652 CodePageMemoryModificationScope::CodePageMemoryModificationScope(Code code)
653     : CodePageMemoryModificationScope(BasicMemoryChunk::FromHeapObject(code)) {}
654 #endif
655 
CodePageMemoryModificationScope(BasicMemoryChunk * chunk)656 CodePageMemoryModificationScope::CodePageMemoryModificationScope(
657     BasicMemoryChunk* chunk)
658     : chunk_(chunk),
659       scope_active_(chunk_->heap()->write_protect_code_memory() &&
660                     chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
661   if (scope_active_) {
662     DCHECK(chunk_->owner()->identity() == CODE_SPACE ||
663            (chunk_->owner()->identity() == CODE_LO_SPACE));
664     MemoryChunk::cast(chunk_)->SetCodeModificationPermissions();
665   }
666 }
667 
~CodePageMemoryModificationScope()668 CodePageMemoryModificationScope::~CodePageMemoryModificationScope() {
669   if (scope_active_) {
670     MemoryChunk::cast(chunk_)->SetDefaultCodePermissions();
671   }
672 }
673 
IgnoreLocalGCRequests(Heap * heap)674 IgnoreLocalGCRequests::IgnoreLocalGCRequests(Heap* heap) : heap_(heap) {
675   DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
676   heap_->ignore_local_gc_requests_depth_++;
677 }
678 
~IgnoreLocalGCRequests()679 IgnoreLocalGCRequests::~IgnoreLocalGCRequests() {
680   DCHECK_GT(heap_->ignore_local_gc_requests_depth_, 0);
681   heap_->ignore_local_gc_requests_depth_--;
682 }
683 
684 }  // namespace internal
685 }  // namespace v8
686 
687 #endif  // V8_HEAP_HEAP_INL_H_
688