1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/memory-chunk.h"
6
7 #include "src/base/platform/platform.h"
8 #include "src/heap/code-object-registry.h"
9 #include "src/heap/memory-allocator.h"
10 #include "src/heap/memory-chunk-inl.h"
11 #include "src/heap/spaces.h"
12 #include "src/objects/heap-object.h"
13
14 namespace v8 {
15 namespace internal {
16
DiscardUnusedMemory(Address addr,size_t size)17 void MemoryChunk::DiscardUnusedMemory(Address addr, size_t size) {
18 base::AddressRegion memory_area =
19 MemoryAllocator::ComputeDiscardMemoryArea(addr, size);
20 if (memory_area.size() != 0) {
21 MemoryAllocator* memory_allocator = heap_->memory_allocator();
22 v8::PageAllocator* page_allocator =
23 memory_allocator->page_allocator(executable());
24 CHECK(page_allocator->DiscardSystemPages(
25 reinterpret_cast<void*>(memory_area.begin()), memory_area.size()));
26 }
27 }
28
InitializationMemoryFence()29 void MemoryChunk::InitializationMemoryFence() {
30 base::SeqCst_MemoryFence();
31 #ifdef THREAD_SANITIZER
32 // Since TSAN does not process memory fences, we use the following annotation
33 // to tell TSAN that there is no data race when emitting a
34 // InitializationMemoryFence. Note that the other thread still needs to
35 // perform MemoryChunk::synchronized_heap().
36 base::Release_Store(reinterpret_cast<base::AtomicWord*>(&heap_),
37 reinterpret_cast<base::AtomicWord>(heap_));
38 #endif
39 }
40
DecrementWriteUnprotectCounterAndMaybeSetPermissions(PageAllocator::Permission permission)41 void MemoryChunk::DecrementWriteUnprotectCounterAndMaybeSetPermissions(
42 PageAllocator::Permission permission) {
43 DCHECK(permission == PageAllocator::kRead ||
44 permission == PageAllocator::kReadExecute);
45 DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
46 DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
47 // Decrementing the write_unprotect_counter_ and changing the page
48 // protection mode has to be atomic.
49 base::MutexGuard guard(page_protection_change_mutex_);
50 if (write_unprotect_counter_ == 0) {
51 // This is a corner case that may happen when we have a
52 // CodeSpaceMemoryModificationScope open and this page was newly
53 // added.
54 return;
55 }
56 write_unprotect_counter_--;
57 DCHECK_LT(write_unprotect_counter_, kMaxWriteUnprotectCounter);
58 if (write_unprotect_counter_ == 0) {
59 Address protect_start =
60 address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
61 size_t page_size = MemoryAllocator::GetCommitPageSize();
62 DCHECK(IsAligned(protect_start, page_size));
63 size_t protect_size = RoundUp(area_size(), page_size);
64 CHECK(reservation_.SetPermissions(protect_start, protect_size, permission));
65 }
66 }
67
SetReadable()68 void MemoryChunk::SetReadable() {
69 DecrementWriteUnprotectCounterAndMaybeSetPermissions(PageAllocator::kRead);
70 }
71
SetReadAndExecutable()72 void MemoryChunk::SetReadAndExecutable() {
73 DCHECK(!FLAG_jitless);
74 DecrementWriteUnprotectCounterAndMaybeSetPermissions(
75 PageAllocator::kReadExecute);
76 }
77
SetReadAndWritable()78 void MemoryChunk::SetReadAndWritable() {
79 DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
80 DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
81 // Incrementing the write_unprotect_counter_ and changing the page
82 // protection mode has to be atomic.
83 base::MutexGuard guard(page_protection_change_mutex_);
84 write_unprotect_counter_++;
85 DCHECK_LE(write_unprotect_counter_, kMaxWriteUnprotectCounter);
86 if (write_unprotect_counter_ == 1) {
87 Address unprotect_start =
88 address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
89 size_t page_size = MemoryAllocator::GetCommitPageSize();
90 DCHECK(IsAligned(unprotect_start, page_size));
91 size_t unprotect_size = RoundUp(area_size(), page_size);
92 CHECK(reservation_.SetPermissions(unprotect_start, unprotect_size,
93 PageAllocator::kReadWrite));
94 }
95 }
96
97 namespace {
98
DefaultWritableCodePermissions()99 PageAllocator::Permission DefaultWritableCodePermissions() {
100 return FLAG_jitless ? PageAllocator::kReadWrite
101 : PageAllocator::kReadWriteExecute;
102 }
103
104 } // namespace
105
Initialize(BasicMemoryChunk * basic_chunk,Heap * heap,Executability executable)106 MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
107 Executability executable) {
108 MemoryChunk* chunk = static_cast<MemoryChunk*>(basic_chunk);
109
110 base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
111 base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
112 base::AsAtomicPointer::Release_Store(&chunk->sweeping_slot_set_, nullptr);
113 base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
114 nullptr);
115 base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
116 nullptr);
117 chunk->invalidated_slots_[OLD_TO_NEW] = nullptr;
118 chunk->invalidated_slots_[OLD_TO_OLD] = nullptr;
119 chunk->progress_bar_ = 0;
120 chunk->set_concurrent_sweeping_state(ConcurrentSweepingState::kDone);
121 chunk->page_protection_change_mutex_ = new base::Mutex();
122 chunk->write_unprotect_counter_ = 0;
123 chunk->mutex_ = new base::Mutex();
124 chunk->young_generation_bitmap_ = nullptr;
125
126 chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
127 0;
128 chunk->external_backing_store_bytes_
129 [ExternalBackingStoreType::kExternalString] = 0;
130
131 chunk->categories_ = nullptr;
132
133 heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
134 0);
135 if (executable == EXECUTABLE) {
136 chunk->SetFlag(IS_EXECUTABLE);
137 if (heap->write_protect_code_memory()) {
138 chunk->write_unprotect_counter_ =
139 heap->code_space_memory_modification_scope_depth();
140 } else {
141 size_t page_size = MemoryAllocator::GetCommitPageSize();
142 DCHECK(IsAligned(chunk->area_start(), page_size));
143 size_t area_size =
144 RoundUp(chunk->area_end() - chunk->area_start(), page_size);
145 CHECK(chunk->reservation_.SetPermissions(
146 chunk->area_start(), area_size, DefaultWritableCodePermissions()));
147 }
148 }
149
150 if (chunk->owner()->identity() == CODE_SPACE) {
151 chunk->code_object_registry_ = new CodeObjectRegistry();
152 } else {
153 chunk->code_object_registry_ = nullptr;
154 }
155
156 chunk->possibly_empty_buckets_.Initialize();
157
158 #ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
159 chunk->object_start_bitmap_ = ObjectStartBitmap(chunk->area_start());
160 #endif
161
162 #ifdef DEBUG
163 ValidateOffsets(chunk);
164 #endif
165
166 return chunk;
167 }
168
CommittedPhysicalMemory()169 size_t MemoryChunk::CommittedPhysicalMemory() {
170 if (!base::OS::HasLazyCommits() || owner_identity() == LO_SPACE)
171 return size();
172 return high_water_mark_;
173 }
174
SetOldGenerationPageFlags(bool is_marking)175 void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
176 if (is_marking) {
177 SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
178 SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
179 SetFlag(MemoryChunk::INCREMENTAL_MARKING);
180 } else {
181 ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
182 SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
183 ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
184 }
185 }
186
SetYoungGenerationPageFlags(bool is_marking)187 void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
188 SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
189 if (is_marking) {
190 SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
191 SetFlag(MemoryChunk::INCREMENTAL_MARKING);
192 } else {
193 ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
194 ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
195 }
196 }
197 // -----------------------------------------------------------------------------
198 // MemoryChunk implementation
199
ReleaseAllocatedMemoryNeededForWritableChunk()200 void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
201 if (mutex_ != nullptr) {
202 delete mutex_;
203 mutex_ = nullptr;
204 }
205 if (page_protection_change_mutex_ != nullptr) {
206 delete page_protection_change_mutex_;
207 page_protection_change_mutex_ = nullptr;
208 }
209 if (code_object_registry_ != nullptr) {
210 delete code_object_registry_;
211 code_object_registry_ = nullptr;
212 }
213
214 possibly_empty_buckets_.Release();
215 ReleaseSlotSet<OLD_TO_NEW>();
216 ReleaseSweepingSlotSet();
217 ReleaseSlotSet<OLD_TO_OLD>();
218 ReleaseTypedSlotSet<OLD_TO_NEW>();
219 ReleaseTypedSlotSet<OLD_TO_OLD>();
220 ReleaseInvalidatedSlots<OLD_TO_NEW>();
221 ReleaseInvalidatedSlots<OLD_TO_OLD>();
222
223 if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
224
225 if (!IsLargePage()) {
226 Page* page = static_cast<Page*>(this);
227 page->ReleaseFreeListCategories();
228 }
229 }
230
ReleaseAllAllocatedMemory()231 void MemoryChunk::ReleaseAllAllocatedMemory() {
232 ReleaseAllocatedMemoryNeededForWritableChunk();
233 }
234
235 template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
236 template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
237
238 template <RememberedSetType type>
AllocateSlotSet()239 SlotSet* MemoryChunk::AllocateSlotSet() {
240 return AllocateSlotSet(&slot_set_[type]);
241 }
242
AllocateSweepingSlotSet()243 SlotSet* MemoryChunk::AllocateSweepingSlotSet() {
244 return AllocateSlotSet(&sweeping_slot_set_);
245 }
246
AllocateSlotSet(SlotSet ** slot_set)247 SlotSet* MemoryChunk::AllocateSlotSet(SlotSet** slot_set) {
248 SlotSet* new_slot_set = SlotSet::Allocate(buckets());
249 SlotSet* old_slot_set = base::AsAtomicPointer::AcquireRelease_CompareAndSwap(
250 slot_set, nullptr, new_slot_set);
251 if (old_slot_set != nullptr) {
252 SlotSet::Delete(new_slot_set, buckets());
253 new_slot_set = old_slot_set;
254 }
255 DCHECK(new_slot_set);
256 return new_slot_set;
257 }
258
259 template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
260 template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
261
262 template <RememberedSetType type>
ReleaseSlotSet()263 void MemoryChunk::ReleaseSlotSet() {
264 ReleaseSlotSet(&slot_set_[type]);
265 }
266
ReleaseSweepingSlotSet()267 void MemoryChunk::ReleaseSweepingSlotSet() {
268 ReleaseSlotSet(&sweeping_slot_set_);
269 }
270
ReleaseSlotSet(SlotSet ** slot_set)271 void MemoryChunk::ReleaseSlotSet(SlotSet** slot_set) {
272 if (*slot_set) {
273 SlotSet::Delete(*slot_set, buckets());
274 *slot_set = nullptr;
275 }
276 }
277
278 template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_NEW>();
279 template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_OLD>();
280
281 template <RememberedSetType type>
AllocateTypedSlotSet()282 TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
283 TypedSlotSet* typed_slot_set = new TypedSlotSet(address());
284 TypedSlotSet* old_value = base::AsAtomicPointer::Release_CompareAndSwap(
285 &typed_slot_set_[type], nullptr, typed_slot_set);
286 if (old_value != nullptr) {
287 delete typed_slot_set;
288 typed_slot_set = old_value;
289 }
290 DCHECK(typed_slot_set);
291 return typed_slot_set;
292 }
293
294 template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_NEW>();
295 template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_OLD>();
296
297 template <RememberedSetType type>
ReleaseTypedSlotSet()298 void MemoryChunk::ReleaseTypedSlotSet() {
299 TypedSlotSet* typed_slot_set = typed_slot_set_[type];
300 if (typed_slot_set) {
301 typed_slot_set_[type] = nullptr;
302 delete typed_slot_set;
303 }
304 }
305
306 template InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots<OLD_TO_NEW>();
307 template InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots<OLD_TO_OLD>();
308
309 template <RememberedSetType type>
AllocateInvalidatedSlots()310 InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() {
311 DCHECK_NULL(invalidated_slots_[type]);
312 invalidated_slots_[type] = new InvalidatedSlots();
313 return invalidated_slots_[type];
314 }
315
316 template void MemoryChunk::ReleaseInvalidatedSlots<OLD_TO_NEW>();
317 template void MemoryChunk::ReleaseInvalidatedSlots<OLD_TO_OLD>();
318
319 template <RememberedSetType type>
ReleaseInvalidatedSlots()320 void MemoryChunk::ReleaseInvalidatedSlots() {
321 if (invalidated_slots_[type]) {
322 delete invalidated_slots_[type];
323 invalidated_slots_[type] = nullptr;
324 }
325 }
326
327 template V8_EXPORT_PRIVATE void
328 MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object);
329 template V8_EXPORT_PRIVATE void
330 MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object);
331
332 template <RememberedSetType type>
RegisterObjectWithInvalidatedSlots(HeapObject object)333 void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) {
334 bool skip_slot_recording;
335
336 if (type == OLD_TO_NEW) {
337 skip_slot_recording = InYoungGeneration();
338 } else {
339 skip_slot_recording = ShouldSkipEvacuationSlotRecording();
340 }
341
342 if (skip_slot_recording) {
343 return;
344 }
345
346 if (invalidated_slots<type>() == nullptr) {
347 AllocateInvalidatedSlots<type>();
348 }
349
350 invalidated_slots<type>()->insert(object);
351 }
352
InvalidateRecordedSlots(HeapObject object)353 void MemoryChunk::InvalidateRecordedSlots(HeapObject object) {
354 if (V8_DISABLE_WRITE_BARRIERS_BOOL) return;
355 if (heap()->incremental_marking()->IsCompacting()) {
356 // We cannot check slot_set_[OLD_TO_OLD] here, since the
357 // concurrent markers might insert slots concurrently.
358 RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
359 }
360
361 if (!FLAG_always_promote_young_mc || slot_set_[OLD_TO_NEW] != nullptr)
362 RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
363 }
364
365 template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(
366 HeapObject object);
367 template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(
368 HeapObject object);
369
370 template <RememberedSetType type>
RegisteredObjectWithInvalidatedSlots(HeapObject object)371 bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) {
372 if (invalidated_slots<type>() == nullptr) {
373 return false;
374 }
375 return invalidated_slots<type>()->find(object) !=
376 invalidated_slots<type>()->end();
377 }
378
AllocateYoungGenerationBitmap()379 void MemoryChunk::AllocateYoungGenerationBitmap() {
380 DCHECK_NULL(young_generation_bitmap_);
381 young_generation_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
382 }
383
ReleaseYoungGenerationBitmap()384 void MemoryChunk::ReleaseYoungGenerationBitmap() {
385 DCHECK_NOT_NULL(young_generation_bitmap_);
386 free(young_generation_bitmap_);
387 young_generation_bitmap_ = nullptr;
388 }
389
390 #ifdef DEBUG
ValidateOffsets(MemoryChunk * chunk)391 void MemoryChunk::ValidateOffsets(MemoryChunk* chunk) {
392 // Note that we cannot use offsetof because MemoryChunk is not a POD.
393 DCHECK_EQ(reinterpret_cast<Address>(&chunk->slot_set_) - chunk->address(),
394 MemoryChunkLayout::kSlotSetOffset);
395 DCHECK_EQ(reinterpret_cast<Address>(&chunk->progress_bar_) - chunk->address(),
396 MemoryChunkLayout::kProgressBarOffset);
397 DCHECK_EQ(
398 reinterpret_cast<Address>(&chunk->live_byte_count_) - chunk->address(),
399 MemoryChunkLayout::kLiveByteCountOffset);
400 DCHECK_EQ(
401 reinterpret_cast<Address>(&chunk->sweeping_slot_set_) - chunk->address(),
402 MemoryChunkLayout::kSweepingSlotSetOffset);
403 DCHECK_EQ(
404 reinterpret_cast<Address>(&chunk->typed_slot_set_) - chunk->address(),
405 MemoryChunkLayout::kTypedSlotSetOffset);
406 DCHECK_EQ(
407 reinterpret_cast<Address>(&chunk->invalidated_slots_) - chunk->address(),
408 MemoryChunkLayout::kInvalidatedSlotsOffset);
409 DCHECK_EQ(reinterpret_cast<Address>(&chunk->mutex_) - chunk->address(),
410 MemoryChunkLayout::kMutexOffset);
411 DCHECK_EQ(reinterpret_cast<Address>(&chunk->concurrent_sweeping_) -
412 chunk->address(),
413 MemoryChunkLayout::kConcurrentSweepingOffset);
414 DCHECK_EQ(reinterpret_cast<Address>(&chunk->page_protection_change_mutex_) -
415 chunk->address(),
416 MemoryChunkLayout::kPageProtectionChangeMutexOffset);
417 DCHECK_EQ(reinterpret_cast<Address>(&chunk->write_unprotect_counter_) -
418 chunk->address(),
419 MemoryChunkLayout::kWriteUnprotectCounterOffset);
420 DCHECK_EQ(reinterpret_cast<Address>(&chunk->external_backing_store_bytes_) -
421 chunk->address(),
422 MemoryChunkLayout::kExternalBackingStoreBytesOffset);
423 DCHECK_EQ(reinterpret_cast<Address>(&chunk->list_node_) - chunk->address(),
424 MemoryChunkLayout::kListNodeOffset);
425 DCHECK_EQ(reinterpret_cast<Address>(&chunk->categories_) - chunk->address(),
426 MemoryChunkLayout::kCategoriesOffset);
427 DCHECK_EQ(
428 reinterpret_cast<Address>(&chunk->young_generation_live_byte_count_) -
429 chunk->address(),
430 MemoryChunkLayout::kYoungGenerationLiveByteCountOffset);
431 DCHECK_EQ(reinterpret_cast<Address>(&chunk->young_generation_bitmap_) -
432 chunk->address(),
433 MemoryChunkLayout::kYoungGenerationBitmapOffset);
434 DCHECK_EQ(reinterpret_cast<Address>(&chunk->code_object_registry_) -
435 chunk->address(),
436 MemoryChunkLayout::kCodeObjectRegistryOffset);
437 DCHECK_EQ(reinterpret_cast<Address>(&chunk->possibly_empty_buckets_) -
438 chunk->address(),
439 MemoryChunkLayout::kPossiblyEmptyBucketsOffset);
440 }
441 #endif
442
443 } // namespace internal
444 } // namespace v8
445