1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_HEAP_HEAP_ALLOCATOR_INL_H_
6 #define V8_HEAP_HEAP_ALLOCATOR_INL_H_
7
8 #include "src/base/logging.h"
9 #include "src/common/globals.h"
10 #include "src/heap/concurrent-allocator-inl.h"
11 #include "src/heap/heap-allocator.h"
12 #include "src/heap/large-spaces.h"
13 #include "src/heap/new-spaces.h"
14 #include "src/heap/paged-spaces.h"
15 #include "src/heap/read-only-spaces.h"
16 #include "src/heap/third-party/heap-api.h"
17
18 namespace v8 {
19 namespace internal {
20
code_space()21 PagedSpace* HeapAllocator::code_space() const {
22 return static_cast<PagedSpace*>(spaces_[CODE_SPACE]);
23 }
24
code_lo_space()25 CodeLargeObjectSpace* HeapAllocator::code_lo_space() const {
26 return static_cast<CodeLargeObjectSpace*>(spaces_[CODE_LO_SPACE]);
27 }
28
lo_space()29 OldLargeObjectSpace* HeapAllocator::lo_space() const {
30 return static_cast<OldLargeObjectSpace*>(spaces_[LO_SPACE]);
31 }
32
space_for_maps()33 PagedSpace* HeapAllocator::space_for_maps() const { return space_for_maps_; }
34
new_space()35 NewSpace* HeapAllocator::new_space() const {
36 return static_cast<NewSpace*>(spaces_[NEW_SPACE]);
37 }
38
new_lo_space()39 NewLargeObjectSpace* HeapAllocator::new_lo_space() const {
40 return static_cast<NewLargeObjectSpace*>(spaces_[NEW_LO_SPACE]);
41 }
42
old_space()43 PagedSpace* HeapAllocator::old_space() const {
44 return static_cast<PagedSpace*>(spaces_[OLD_SPACE]);
45 }
46
read_only_space()47 ReadOnlySpace* HeapAllocator::read_only_space() const {
48 return read_only_space_;
49 }
50
CanAllocateInReadOnlySpace()51 bool HeapAllocator::CanAllocateInReadOnlySpace() const {
52 return read_only_space()->writable();
53 }
54
55 template <AllocationType type>
AllocateRaw(int size_in_bytes,AllocationOrigin origin,AllocationAlignment alignment)56 V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult HeapAllocator::AllocateRaw(
57 int size_in_bytes, AllocationOrigin origin, AllocationAlignment alignment) {
58 DCHECK_EQ(heap_->gc_state(), Heap::NOT_IN_GC);
59 DCHECK(AllowHandleAllocation::IsAllowed());
60 DCHECK(AllowHeapAllocation::IsAllowed());
61
62 if (FLAG_single_generation && type == AllocationType::kYoung) {
63 return AllocateRaw(size_in_bytes, AllocationType::kOld, origin, alignment);
64 }
65
66 #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
67 if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
68 if (!heap_->always_allocate() && allocation_timeout_-- <= 0) {
69 return AllocationResult::Failure();
70 }
71 }
72 #endif // V8_ENABLE_ALLOCATION_TIMEOUT
73
74 #ifdef DEBUG
75 IncrementObjectCounters();
76 #endif // DEBUG
77
78 if (heap_->CanSafepoint()) {
79 heap_->main_thread_local_heap()->Safepoint();
80 }
81
82 const size_t large_object_threshold = heap_->MaxRegularHeapObjectSize(type);
83 const bool large_object =
84 static_cast<size_t>(size_in_bytes) > large_object_threshold;
85
86 HeapObject object;
87 AllocationResult allocation;
88
89 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
90 allocation = heap_->tp_heap_->Allocate(size_in_bytes, type, alignment);
91 } else {
92 if (V8_UNLIKELY(large_object)) {
93 allocation =
94 AllocateRawLargeInternal(size_in_bytes, type, origin, alignment);
95 } else {
96 switch (type) {
97 case AllocationType::kYoung:
98 allocation =
99 new_space()->AllocateRaw(size_in_bytes, alignment, origin);
100 break;
101 case AllocationType::kOld:
102 allocation =
103 old_space()->AllocateRaw(size_in_bytes, alignment, origin);
104 break;
105 case AllocationType::kCode:
106 DCHECK_EQ(alignment, AllocationAlignment::kTaggedAligned);
107 DCHECK(AllowCodeAllocation::IsAllowed());
108 allocation = code_space()->AllocateRawUnaligned(size_in_bytes);
109 break;
110 case AllocationType::kMap:
111 DCHECK_EQ(alignment, AllocationAlignment::kTaggedAligned);
112 allocation = space_for_maps()->AllocateRawUnaligned(size_in_bytes);
113 break;
114 case AllocationType::kReadOnly:
115 DCHECK(read_only_space()->writable());
116 DCHECK_EQ(AllocationOrigin::kRuntime, origin);
117 allocation = read_only_space()->AllocateRaw(size_in_bytes, alignment);
118 break;
119 case AllocationType::kSharedMap:
120 allocation = shared_map_allocator_->AllocateRaw(size_in_bytes,
121 alignment, origin);
122 break;
123 case AllocationType::kSharedOld:
124 allocation = shared_old_allocator_->AllocateRaw(size_in_bytes,
125 alignment, origin);
126 break;
127 }
128 }
129 }
130
131 if (allocation.To(&object)) {
132 if (AllocationType::kCode == type && !V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
133 // Unprotect the memory chunk of the object if it was not unprotected
134 // already.
135 heap_->UnprotectAndRegisterMemoryChunk(
136 object, UnprotectMemoryOrigin::kMainThread);
137 heap_->ZapCodeObject(object.address(), size_in_bytes);
138 if (!large_object) {
139 MemoryChunk::FromHeapObject(object)
140 ->GetCodeObjectRegistry()
141 ->RegisterNewlyAllocatedCodeObject(object.address());
142 }
143 }
144
145 #ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
146 if (AllocationType::kReadOnly != type) {
147 DCHECK_TAG_ALIGNED(object.address());
148 Page::FromHeapObject(object)->object_start_bitmap()->SetBit(
149 object.address());
150 }
151 #endif // V8_ENABLE_CONSERVATIVE_STACK_SCANNING
152
153 for (auto& tracker : heap_->allocation_trackers_) {
154 tracker->AllocationEvent(object.address(), size_in_bytes);
155 }
156 }
157
158 return allocation;
159 }
160
AllocateRaw(int size_in_bytes,AllocationType type,AllocationOrigin origin,AllocationAlignment alignment)161 AllocationResult HeapAllocator::AllocateRaw(int size_in_bytes,
162 AllocationType type,
163 AllocationOrigin origin,
164 AllocationAlignment alignment) {
165 switch (type) {
166 case AllocationType::kYoung:
167 return AllocateRaw<AllocationType::kYoung>(size_in_bytes, origin,
168 alignment);
169 case AllocationType::kOld:
170 return AllocateRaw<AllocationType::kOld>(size_in_bytes, origin,
171 alignment);
172 case AllocationType::kCode:
173 return AllocateRaw<AllocationType::kCode>(size_in_bytes, origin,
174 alignment);
175 case AllocationType::kMap:
176 return AllocateRaw<AllocationType::kMap>(size_in_bytes, origin,
177 alignment);
178 case AllocationType::kReadOnly:
179 return AllocateRaw<AllocationType::kReadOnly>(size_in_bytes, origin,
180 alignment);
181 case AllocationType::kSharedMap:
182 return AllocateRaw<AllocationType::kSharedMap>(size_in_bytes, origin,
183 alignment);
184 case AllocationType::kSharedOld:
185 return AllocateRaw<AllocationType::kSharedOld>(size_in_bytes, origin,
186 alignment);
187 }
188 UNREACHABLE();
189 }
190
AllocateRawData(int size_in_bytes,AllocationType type,AllocationOrigin origin,AllocationAlignment alignment)191 AllocationResult HeapAllocator::AllocateRawData(int size_in_bytes,
192 AllocationType type,
193 AllocationOrigin origin,
194 AllocationAlignment alignment) {
195 switch (type) {
196 case AllocationType::kYoung:
197 return AllocateRaw<AllocationType::kYoung>(size_in_bytes, origin,
198 alignment);
199 case AllocationType::kOld:
200 return AllocateRaw<AllocationType::kOld>(size_in_bytes, origin,
201 alignment);
202 case AllocationType::kCode:
203 case AllocationType::kMap:
204 case AllocationType::kReadOnly:
205 case AllocationType::kSharedMap:
206 case AllocationType::kSharedOld:
207 UNREACHABLE();
208 }
209 UNREACHABLE();
210 }
211
212 template <HeapAllocator::AllocationRetryMode mode>
AllocateRawWith(int size,AllocationType allocation,AllocationOrigin origin,AllocationAlignment alignment)213 V8_WARN_UNUSED_RESULT V8_INLINE HeapObject HeapAllocator::AllocateRawWith(
214 int size, AllocationType allocation, AllocationOrigin origin,
215 AllocationAlignment alignment) {
216 AllocationResult result;
217 HeapObject object;
218 if (allocation == AllocationType::kYoung) {
219 result = AllocateRaw<AllocationType::kYoung>(size, origin, alignment);
220 if (result.To(&object)) {
221 return object;
222 }
223 } else if (allocation == AllocationType::kOld) {
224 result = AllocateRaw<AllocationType::kOld>(size, origin, alignment);
225 if (result.To(&object)) {
226 return object;
227 }
228 }
229 switch (mode) {
230 case kLightRetry:
231 result = AllocateRawWithLightRetrySlowPath(size, allocation, origin,
232 alignment);
233 break;
234 case kRetryOrFail:
235 result = AllocateRawWithRetryOrFailSlowPath(size, allocation, origin,
236 alignment);
237 break;
238 }
239 if (result.To(&object)) {
240 return object;
241 }
242 return HeapObject();
243 }
244
245 } // namespace internal
246 } // namespace v8
247
248 #endif // V8_HEAP_HEAP_ALLOCATOR_INL_H_
249