1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_HEAP_HEAP_INL_H_
6 #define V8_HEAP_HEAP_INL_H_
7
8 #include <cmath>
9
10 // Clients of this interface shouldn't depend on lots of heap internals.
11 // Do not include anything from src/heap other than src/heap/heap.h and its
12 // write barrier here!
13 #include "src/heap/heap-write-barrier.h"
14 #include "src/heap/heap.h"
15
16 #include "src/base/platform/platform.h"
17 #include "src/counters-inl.h"
18 #include "src/feedback-vector.h"
19
20 // TODO(mstarzinger): There is one more include to remove in order to no longer
21 // leak heap internals to users of this interface!
22 #include "src/heap/spaces-inl.h"
23 #include "src/isolate.h"
24 #include "src/log.h"
25 #include "src/msan.h"
26 #include "src/objects-inl.h"
27 #include "src/objects/api-callbacks-inl.h"
28 #include "src/objects/descriptor-array.h"
29 #include "src/objects/literal-objects.h"
30 #include "src/objects/scope-info.h"
31 #include "src/objects/script-inl.h"
32 #include "src/profiler/heap-profiler.h"
33 #include "src/string-hasher.h"
34 #include "src/zone/zone-list-inl.h"
35
36 // The following header includes the write barrier essentials that can also be
37 // used stand-alone without including heap-inl.h.
38 // TODO(mlippautz): Remove once users of object-macros.h include this file on
39 // their own.
40 #include "src/heap/heap-write-barrier-inl.h"
41
42 namespace v8 {
43 namespace internal {
44
RetrySpace()45 AllocationSpace AllocationResult::RetrySpace() {
46 DCHECK(IsRetry());
47 return static_cast<AllocationSpace>(Smi::ToInt(object_));
48 }
49
ToObjectChecked()50 HeapObject* AllocationResult::ToObjectChecked() {
51 CHECK(!IsRetry());
52 return HeapObject::cast(object_);
53 }
54
55 #define ROOT_ACCESSOR(type, name, camel_name) \
56 type* Heap::name() { return type::cast(roots_[k##camel_name##RootIndex]); }
57 MUTABLE_ROOT_LIST(ROOT_ACCESSOR)
58 #undef ROOT_ACCESSOR
59
60 #define DATA_HANDLER_MAP_ACCESSOR(NAME, Name, Size, name) \
61 Map* Heap::name##_map() { \
62 return Map::cast(roots_[k##Name##Size##MapRootIndex]); \
63 }
DATA_HANDLER_LIST(DATA_HANDLER_MAP_ACCESSOR)64 DATA_HANDLER_LIST(DATA_HANDLER_MAP_ACCESSOR)
65 #undef DATA_HANDLER_MAP_ACCESSOR
66
67 #define ACCESSOR_INFO_ACCESSOR(accessor_name, AccessorName) \
68 AccessorInfo* Heap::accessor_name##_accessor() { \
69 return AccessorInfo::cast(roots_[k##AccessorName##AccessorRootIndex]); \
70 }
71 ACCESSOR_INFO_LIST(ACCESSOR_INFO_ACCESSOR)
72 #undef ACCESSOR_INFO_ACCESSOR
73
74 #define ROOT_ACCESSOR(type, name, camel_name) \
75 void Heap::set_##name(type* value) { \
76 /* The deserializer makes use of the fact that these common roots are */ \
77 /* never in new space and never on a page that is being compacted. */ \
78 DCHECK(!deserialization_complete() || \
79 RootCanBeWrittenAfterInitialization(k##camel_name##RootIndex)); \
80 DCHECK(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \
81 roots_[k##camel_name##RootIndex] = value; \
82 }
83 ROOT_LIST(ROOT_ACCESSOR)
84 #undef ROOT_ACCESSOR
85
86 PagedSpace* Heap::paged_space(int idx) {
87 DCHECK_NE(idx, LO_SPACE);
88 DCHECK_NE(idx, NEW_SPACE);
89 return static_cast<PagedSpace*>(space_[idx]);
90 }
91
space(int idx)92 Space* Heap::space(int idx) { return space_[idx]; }
93
NewSpaceAllocationTopAddress()94 Address* Heap::NewSpaceAllocationTopAddress() {
95 return new_space_->allocation_top_address();
96 }
97
NewSpaceAllocationLimitAddress()98 Address* Heap::NewSpaceAllocationLimitAddress() {
99 return new_space_->allocation_limit_address();
100 }
101
OldSpaceAllocationTopAddress()102 Address* Heap::OldSpaceAllocationTopAddress() {
103 return old_space_->allocation_top_address();
104 }
105
OldSpaceAllocationLimitAddress()106 Address* Heap::OldSpaceAllocationLimitAddress() {
107 return old_space_->allocation_limit_address();
108 }
109
UpdateNewSpaceAllocationCounter()110 void Heap::UpdateNewSpaceAllocationCounter() {
111 new_space_allocation_counter_ = NewSpaceAllocationCounter();
112 }
113
NewSpaceAllocationCounter()114 size_t Heap::NewSpaceAllocationCounter() {
115 return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
116 }
117
AllocateRaw(int size_in_bytes,AllocationSpace space,AllocationAlignment alignment)118 AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
119 AllocationAlignment alignment) {
120 DCHECK(AllowHandleAllocation::IsAllowed());
121 DCHECK(AllowHeapAllocation::IsAllowed());
122 DCHECK(gc_state_ == NOT_IN_GC);
123 #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
124 if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
125 if (!always_allocate() && Heap::allocation_timeout_-- <= 0) {
126 return AllocationResult::Retry(space);
127 }
128 }
129 #endif
130 #ifdef DEBUG
131 isolate_->counters()->objs_since_last_full()->Increment();
132 isolate_->counters()->objs_since_last_young()->Increment();
133 #endif
134
135 bool large_object = size_in_bytes > kMaxRegularHeapObjectSize;
136 bool new_large_object = FLAG_young_generation_large_objects &&
137 size_in_bytes > kMaxNewSpaceHeapObjectSize;
138 HeapObject* object = nullptr;
139 AllocationResult allocation;
140 if (NEW_SPACE == space) {
141 if (large_object) {
142 space = LO_SPACE;
143 } else {
144 if (new_large_object) {
145 allocation = new_lo_space_->AllocateRaw(size_in_bytes);
146 } else {
147 allocation = new_space_->AllocateRaw(size_in_bytes, alignment);
148 }
149 if (allocation.To(&object)) {
150 OnAllocationEvent(object, size_in_bytes);
151 }
152 return allocation;
153 }
154 }
155
156 // Here we only allocate in the old generation.
157 if (OLD_SPACE == space) {
158 if (large_object) {
159 allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
160 } else {
161 allocation = old_space_->AllocateRaw(size_in_bytes, alignment);
162 }
163 } else if (CODE_SPACE == space) {
164 if (size_in_bytes <= code_space()->AreaSize()) {
165 allocation = code_space_->AllocateRawUnaligned(size_in_bytes);
166 } else {
167 allocation = lo_space_->AllocateRaw(size_in_bytes, EXECUTABLE);
168 }
169 } else if (LO_SPACE == space) {
170 DCHECK(large_object);
171 allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
172 } else if (MAP_SPACE == space) {
173 allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
174 } else if (RO_SPACE == space) {
175 #ifdef V8_USE_SNAPSHOT
176 DCHECK(isolate_->serializer_enabled());
177 #endif
178 DCHECK(!large_object);
179 DCHECK(CanAllocateInReadOnlySpace());
180 allocation = read_only_space_->AllocateRaw(size_in_bytes, alignment);
181 } else {
182 // NEW_SPACE is not allowed here.
183 UNREACHABLE();
184 }
185
186 if (allocation.To(&object)) {
187 if (space == CODE_SPACE) {
188 // Unprotect the memory chunk of the object if it was not unprotected
189 // already.
190 UnprotectAndRegisterMemoryChunk(object);
191 ZapCodeObject(object->address(), size_in_bytes);
192 }
193 OnAllocationEvent(object, size_in_bytes);
194 }
195
196 return allocation;
197 }
198
OnAllocationEvent(HeapObject * object,int size_in_bytes)199 void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) {
200 for (auto& tracker : allocation_trackers_) {
201 tracker->AllocationEvent(object->address(), size_in_bytes);
202 }
203
204 if (FLAG_verify_predictable) {
205 ++allocations_count_;
206 // Advance synthetic time by making a time request.
207 MonotonicallyIncreasingTimeInMs();
208
209 UpdateAllocationsHash(object);
210 UpdateAllocationsHash(size_in_bytes);
211
212 if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
213 PrintAllocationsHash();
214 }
215 } else if (FLAG_fuzzer_gc_analysis) {
216 ++allocations_count_;
217 } else if (FLAG_trace_allocation_stack_interval > 0) {
218 ++allocations_count_;
219 if (allocations_count_ % FLAG_trace_allocation_stack_interval == 0) {
220 isolate()->PrintStack(stdout, Isolate::kPrintStackConcise);
221 }
222 }
223 }
224
225
OnMoveEvent(HeapObject * target,HeapObject * source,int size_in_bytes)226 void Heap::OnMoveEvent(HeapObject* target, HeapObject* source,
227 int size_in_bytes) {
228 HeapProfiler* heap_profiler = isolate_->heap_profiler();
229 if (heap_profiler->is_tracking_object_moves()) {
230 heap_profiler->ObjectMoveEvent(source->address(), target->address(),
231 size_in_bytes);
232 }
233 for (auto& tracker : allocation_trackers_) {
234 tracker->MoveEvent(source->address(), target->address(), size_in_bytes);
235 }
236 if (target->IsSharedFunctionInfo()) {
237 LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source->address(),
238 target->address()));
239 }
240
241 if (FLAG_verify_predictable) {
242 ++allocations_count_;
243 // Advance synthetic time by making a time request.
244 MonotonicallyIncreasingTimeInMs();
245
246 UpdateAllocationsHash(source);
247 UpdateAllocationsHash(target);
248 UpdateAllocationsHash(size_in_bytes);
249
250 if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
251 PrintAllocationsHash();
252 }
253 } else if (FLAG_fuzzer_gc_analysis) {
254 ++allocations_count_;
255 }
256 }
257
CanAllocateInReadOnlySpace()258 bool Heap::CanAllocateInReadOnlySpace() {
259 return !deserialization_complete_ &&
260 (isolate()->serializer_enabled() ||
261 !isolate()->initialized_from_snapshot());
262 }
263
UpdateAllocationsHash(HeapObject * object)264 void Heap::UpdateAllocationsHash(HeapObject* object) {
265 Address object_address = object->address();
266 MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
267 AllocationSpace allocation_space = memory_chunk->owner()->identity();
268
269 STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32);
270 uint32_t value =
271 static_cast<uint32_t>(object_address - memory_chunk->address()) |
272 (static_cast<uint32_t>(allocation_space) << kPageSizeBits);
273
274 UpdateAllocationsHash(value);
275 }
276
277
UpdateAllocationsHash(uint32_t value)278 void Heap::UpdateAllocationsHash(uint32_t value) {
279 uint16_t c1 = static_cast<uint16_t>(value);
280 uint16_t c2 = static_cast<uint16_t>(value >> 16);
281 raw_allocations_hash_ =
282 StringHasher::AddCharacterCore(raw_allocations_hash_, c1);
283 raw_allocations_hash_ =
284 StringHasher::AddCharacterCore(raw_allocations_hash_, c2);
285 }
286
287
RegisterExternalString(String * string)288 void Heap::RegisterExternalString(String* string) {
289 DCHECK(string->IsExternalString());
290 DCHECK(!string->IsThinString());
291 external_string_table_.AddString(string);
292 }
293
UpdateExternalString(String * string,size_t old_payload,size_t new_payload)294 void Heap::UpdateExternalString(String* string, size_t old_payload,
295 size_t new_payload) {
296 DCHECK(string->IsExternalString());
297 Page* page = Page::FromHeapObject(string);
298
299 if (old_payload > new_payload)
300 page->DecrementExternalBackingStoreBytes(
301 ExternalBackingStoreType::kExternalString, old_payload - new_payload);
302 else
303 page->IncrementExternalBackingStoreBytes(
304 ExternalBackingStoreType::kExternalString, new_payload - old_payload);
305 }
306
FinalizeExternalString(String * string)307 void Heap::FinalizeExternalString(String* string) {
308 DCHECK(string->IsExternalString());
309 Page* page = Page::FromHeapObject(string);
310 ExternalString* ext_string = ExternalString::cast(string);
311
312 page->DecrementExternalBackingStoreBytes(
313 ExternalBackingStoreType::kExternalString,
314 ext_string->ExternalPayloadSize());
315
316 v8::String::ExternalStringResourceBase** resource_addr =
317 reinterpret_cast<v8::String::ExternalStringResourceBase**>(
318 reinterpret_cast<byte*>(string) + ExternalString::kResourceOffset -
319 kHeapObjectTag);
320
321 // Dispose of the C++ object if it has not already been disposed.
322 if (*resource_addr != nullptr) {
323 (*resource_addr)->Dispose();
324 *resource_addr = nullptr;
325 }
326 }
327
NewSpaceTop()328 Address Heap::NewSpaceTop() { return new_space_->top(); }
329
330 // static
InNewSpace(Object * object)331 bool Heap::InNewSpace(Object* object) {
332 DCHECK(!HasWeakHeapObjectTag(object));
333 return object->IsHeapObject() && InNewSpace(HeapObject::cast(object));
334 }
335
336 // static
InNewSpace(MaybeObject * object)337 bool Heap::InNewSpace(MaybeObject* object) {
338 HeapObject* heap_object;
339 return object->ToStrongOrWeakHeapObject(&heap_object) &&
340 InNewSpace(heap_object);
341 }
342
343 // static
InNewSpace(HeapObject * heap_object)344 bool Heap::InNewSpace(HeapObject* heap_object) {
345 // Inlined check from NewSpace::Contains.
346 bool result = MemoryChunk::FromHeapObject(heap_object)->InNewSpace();
347 #ifdef DEBUG
348 // If in NEW_SPACE, then check we're either not in the middle of GC or the
349 // object is in to-space.
350 if (result) {
351 // If the object is in NEW_SPACE, then it's not in RO_SPACE so this is safe.
352 Heap* heap = Heap::FromWritableHeapObject(heap_object);
353 DCHECK(heap->gc_state_ != NOT_IN_GC || InToSpace(heap_object));
354 }
355 #endif
356 return result;
357 }
358
359 // static
InFromSpace(Object * object)360 bool Heap::InFromSpace(Object* object) {
361 DCHECK(!HasWeakHeapObjectTag(object));
362 return object->IsHeapObject() && InFromSpace(HeapObject::cast(object));
363 }
364
365 // static
InFromSpace(MaybeObject * object)366 bool Heap::InFromSpace(MaybeObject* object) {
367 HeapObject* heap_object;
368 return object->ToStrongOrWeakHeapObject(&heap_object) &&
369 InFromSpace(heap_object);
370 }
371
372 // static
InFromSpace(HeapObject * heap_object)373 bool Heap::InFromSpace(HeapObject* heap_object) {
374 return MemoryChunk::FromHeapObject(heap_object)
375 ->IsFlagSet(Page::IN_FROM_SPACE);
376 }
377
378 // static
InToSpace(Object * object)379 bool Heap::InToSpace(Object* object) {
380 DCHECK(!HasWeakHeapObjectTag(object));
381 return object->IsHeapObject() && InToSpace(HeapObject::cast(object));
382 }
383
384 // static
InToSpace(MaybeObject * object)385 bool Heap::InToSpace(MaybeObject* object) {
386 HeapObject* heap_object;
387 return object->ToStrongOrWeakHeapObject(&heap_object) &&
388 InToSpace(heap_object);
389 }
390
391 // static
InToSpace(HeapObject * heap_object)392 bool Heap::InToSpace(HeapObject* heap_object) {
393 return MemoryChunk::FromHeapObject(heap_object)->IsFlagSet(Page::IN_TO_SPACE);
394 }
395
InOldSpace(Object * object)396 bool Heap::InOldSpace(Object* object) { return old_space_->Contains(object); }
397
InReadOnlySpace(Object * object)398 bool Heap::InReadOnlySpace(Object* object) {
399 return read_only_space_->Contains(object);
400 }
401
InNewSpaceSlow(Address address)402 bool Heap::InNewSpaceSlow(Address address) {
403 return new_space_->ContainsSlow(address);
404 }
405
InOldSpaceSlow(Address address)406 bool Heap::InOldSpaceSlow(Address address) {
407 return old_space_->ContainsSlow(address);
408 }
409
410 // static
FromWritableHeapObject(const HeapObject * obj)411 Heap* Heap::FromWritableHeapObject(const HeapObject* obj) {
412 MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
413 // RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
414 // find a heap. The exception is when the ReadOnlySpace is writeable, during
415 // bootstrapping, so explicitly allow this case.
416 SLOW_DCHECK(chunk->owner()->identity() != RO_SPACE ||
417 static_cast<ReadOnlySpace*>(chunk->owner())->writable());
418 Heap* heap = chunk->heap();
419 SLOW_DCHECK(heap != nullptr);
420 return heap;
421 }
422
ShouldBePromoted(Address old_address)423 bool Heap::ShouldBePromoted(Address old_address) {
424 Page* page = Page::FromAddress(old_address);
425 Address age_mark = new_space_->age_mark();
426 return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
427 (!page->ContainsLimit(age_mark) || old_address < age_mark);
428 }
429
CopyBlock(Address dst,Address src,int byte_size)430 void Heap::CopyBlock(Address dst, Address src, int byte_size) {
431 CopyWords(reinterpret_cast<Object**>(dst), reinterpret_cast<Object**>(src),
432 static_cast<size_t>(byte_size / kPointerSize));
433 }
434
435 template <Heap::FindMementoMode mode>
FindAllocationMemento(Map * map,HeapObject * object)436 AllocationMemento* Heap::FindAllocationMemento(Map* map, HeapObject* object) {
437 Address object_address = object->address();
438 Address memento_address = object_address + object->SizeFromMap(map);
439 Address last_memento_word_address = memento_address + kPointerSize;
440 // If the memento would be on another page, bail out immediately.
441 if (!Page::OnSamePage(object_address, last_memento_word_address)) {
442 return nullptr;
443 }
444 HeapObject* candidate = HeapObject::FromAddress(memento_address);
445 Map* candidate_map = candidate->map();
446 // This fast check may peek at an uninitialized word. However, the slow check
447 // below (memento_address == top) ensures that this is safe. Mark the word as
448 // initialized to silence MemorySanitizer warnings.
449 MSAN_MEMORY_IS_INITIALIZED(&candidate_map, sizeof(candidate_map));
450 if (candidate_map != ReadOnlyRoots(this).allocation_memento_map()) {
451 return nullptr;
452 }
453
454 // Bail out if the memento is below the age mark, which can happen when
455 // mementos survived because a page got moved within new space.
456 Page* object_page = Page::FromAddress(object_address);
457 if (object_page->IsFlagSet(Page::NEW_SPACE_BELOW_AGE_MARK)) {
458 Address age_mark =
459 reinterpret_cast<SemiSpace*>(object_page->owner())->age_mark();
460 if (!object_page->Contains(age_mark)) {
461 return nullptr;
462 }
463 // Do an exact check in the case where the age mark is on the same page.
464 if (object_address < age_mark) {
465 return nullptr;
466 }
467 }
468
469 AllocationMemento* memento_candidate = AllocationMemento::cast(candidate);
470
471 // Depending on what the memento is used for, we might need to perform
472 // additional checks.
473 Address top;
474 switch (mode) {
475 case Heap::kForGC:
476 return memento_candidate;
477 case Heap::kForRuntime:
478 if (memento_candidate == nullptr) return nullptr;
479 // Either the object is the last object in the new space, or there is
480 // another object of at least word size (the header map word) following
481 // it, so suffices to compare ptr and top here.
482 top = NewSpaceTop();
483 DCHECK(memento_address == top ||
484 memento_address + HeapObject::kHeaderSize <= top ||
485 !Page::OnSamePage(memento_address, top - 1));
486 if ((memento_address != top) && memento_candidate->IsValid()) {
487 return memento_candidate;
488 }
489 return nullptr;
490 default:
491 UNREACHABLE();
492 }
493 UNREACHABLE();
494 }
495
UpdateAllocationSite(Map * map,HeapObject * object,PretenuringFeedbackMap * pretenuring_feedback)496 void Heap::UpdateAllocationSite(Map* map, HeapObject* object,
497 PretenuringFeedbackMap* pretenuring_feedback) {
498 DCHECK_NE(pretenuring_feedback, &global_pretenuring_feedback_);
499 DCHECK(
500 InFromSpace(object) ||
501 (InToSpace(object) && Page::FromAddress(object->address())
502 ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) ||
503 (!InNewSpace(object) && Page::FromAddress(object->address())
504 ->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)));
505 if (!FLAG_allocation_site_pretenuring ||
506 !AllocationSite::CanTrack(map->instance_type()))
507 return;
508 AllocationMemento* memento_candidate =
509 FindAllocationMemento<kForGC>(map, object);
510 if (memento_candidate == nullptr) return;
511
512 // Entering cached feedback is used in the parallel case. We are not allowed
513 // to dereference the allocation site and rather have to postpone all checks
514 // till actually merging the data.
515 Address key = memento_candidate->GetAllocationSiteUnchecked();
516 (*pretenuring_feedback)[reinterpret_cast<AllocationSite*>(key)]++;
517 }
518
isolate()519 Isolate* Heap::isolate() {
520 return reinterpret_cast<Isolate*>(
521 reinterpret_cast<intptr_t>(this) -
522 reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16);
523 }
524
AddString(String * string)525 void Heap::ExternalStringTable::AddString(String* string) {
526 DCHECK(string->IsExternalString());
527 DCHECK(!Contains(string));
528
529 if (InNewSpace(string)) {
530 new_space_strings_.push_back(string);
531 } else {
532 old_space_strings_.push_back(string);
533 }
534 }
535
ToBoolean(bool condition)536 Oddball* Heap::ToBoolean(bool condition) {
537 ReadOnlyRoots roots(this);
538 return condition ? roots.true_value() : roots.false_value();
539 }
540
HashSeed()541 uint64_t Heap::HashSeed() {
542 uint64_t seed;
543 hash_seed()->copy_out(0, reinterpret_cast<byte*>(&seed), kInt64Size);
544 DCHECK(FLAG_randomize_hashes || seed == 0);
545 return seed;
546 }
547
NextScriptId()548 int Heap::NextScriptId() {
549 int last_id = last_script_id()->value();
550 if (last_id == Smi::kMaxValue) last_id = v8::UnboundScript::kNoScriptId;
551 last_id++;
552 set_last_script_id(Smi::FromInt(last_id));
553 return last_id;
554 }
555
NextDebuggingId()556 int Heap::NextDebuggingId() {
557 int last_id = last_debugging_id()->value();
558 if (last_id == DebugInfo::DebuggingIdBits::kMax) {
559 last_id = DebugInfo::kNoDebuggingId;
560 }
561 last_id++;
562 set_last_debugging_id(Smi::FromInt(last_id));
563 return last_id;
564 }
565
GetNextTemplateSerialNumber()566 int Heap::GetNextTemplateSerialNumber() {
567 int next_serial_number = next_template_serial_number()->value() + 1;
568 set_next_template_serial_number(Smi::FromInt(next_serial_number));
569 return next_serial_number;
570 }
571
MaxNumberToStringCacheSize()572 int Heap::MaxNumberToStringCacheSize() const {
573 // Compute the size of the number string cache based on the max newspace size.
574 // The number string cache has a minimum size based on twice the initial cache
575 // size to ensure that it is bigger after being made 'full size'.
576 size_t number_string_cache_size = max_semi_space_size_ / 512;
577 number_string_cache_size =
578 Max(static_cast<size_t>(kInitialNumberStringCacheSize * 2),
579 Min<size_t>(0x4000u, number_string_cache_size));
580 // There is a string and a number per entry so the length is twice the number
581 // of entries.
582 return static_cast<int>(number_string_cache_size * 2);
583 }
AlwaysAllocateScope(Isolate * isolate)584 AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
585 : heap_(isolate->heap()) {
586 heap_->always_allocate_scope_count_++;
587 }
588
~AlwaysAllocateScope()589 AlwaysAllocateScope::~AlwaysAllocateScope() {
590 heap_->always_allocate_scope_count_--;
591 }
592
CodeSpaceMemoryModificationScope(Heap * heap)593 CodeSpaceMemoryModificationScope::CodeSpaceMemoryModificationScope(Heap* heap)
594 : heap_(heap) {
595 if (heap_->write_protect_code_memory()) {
596 heap_->increment_code_space_memory_modification_scope_depth();
597 heap_->code_space()->SetReadAndWritable();
598 LargePage* page = heap_->lo_space()->first_page();
599 while (page != nullptr) {
600 if (page->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
601 CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
602 page->SetReadAndWritable();
603 }
604 page = page->next_page();
605 }
606 }
607 }
608
~CodeSpaceMemoryModificationScope()609 CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
610 if (heap_->write_protect_code_memory()) {
611 heap_->decrement_code_space_memory_modification_scope_depth();
612 heap_->code_space()->SetReadAndExecutable();
613 LargePage* page = heap_->lo_space()->first_page();
614 while (page != nullptr) {
615 if (page->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
616 CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
617 page->SetReadAndExecutable();
618 }
619 page = page->next_page();
620 }
621 }
622 }
623
624 CodePageCollectionMemoryModificationScope::
CodePageCollectionMemoryModificationScope(Heap * heap)625 CodePageCollectionMemoryModificationScope(Heap* heap)
626 : heap_(heap) {
627 if (heap_->write_protect_code_memory() &&
628 !heap_->code_space_memory_modification_scope_depth()) {
629 heap_->EnableUnprotectedMemoryChunksRegistry();
630 }
631 }
632
633 CodePageCollectionMemoryModificationScope::
~CodePageCollectionMemoryModificationScope()634 ~CodePageCollectionMemoryModificationScope() {
635 if (heap_->write_protect_code_memory() &&
636 !heap_->code_space_memory_modification_scope_depth()) {
637 heap_->ProtectUnprotectedMemoryChunks();
638 heap_->DisableUnprotectedMemoryChunksRegistry();
639 }
640 }
641
CodePageMemoryModificationScope(MemoryChunk * chunk)642 CodePageMemoryModificationScope::CodePageMemoryModificationScope(
643 MemoryChunk* chunk)
644 : chunk_(chunk),
645 scope_active_(chunk_->heap()->write_protect_code_memory() &&
646 chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
647 if (scope_active_) {
648 DCHECK(chunk_->owner()->identity() == CODE_SPACE ||
649 (chunk_->owner()->identity() == LO_SPACE &&
650 chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)));
651 chunk_->SetReadAndWritable();
652 }
653 }
654
~CodePageMemoryModificationScope()655 CodePageMemoryModificationScope::~CodePageMemoryModificationScope() {
656 if (scope_active_) {
657 chunk_->SetReadAndExecutable();
658 }
659 }
660
661 } // namespace internal
662 } // namespace v8
663
664 #endif // V8_HEAP_HEAP_INL_H_
665