• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/memory-chunk.h"
6 
7 #include "src/base/platform/platform.h"
8 #include "src/base/platform/wrappers.h"
9 #include "src/common/globals.h"
10 #include "src/heap/basic-memory-chunk.h"
11 #include "src/heap/code-object-registry.h"
12 #include "src/heap/memory-allocator.h"
13 #include "src/heap/memory-chunk-inl.h"
14 #include "src/heap/memory-chunk-layout.h"
15 #include "src/heap/spaces.h"
16 #include "src/objects/heap-object.h"
17 
18 namespace v8 {
19 namespace internal {
20 
DiscardUnusedMemory(Address addr,size_t size)21 void MemoryChunk::DiscardUnusedMemory(Address addr, size_t size) {
22   base::AddressRegion memory_area =
23       MemoryAllocator::ComputeDiscardMemoryArea(addr, size);
24   if (memory_area.size() != 0) {
25     MemoryAllocator* memory_allocator = heap_->memory_allocator();
26     v8::PageAllocator* page_allocator =
27         memory_allocator->page_allocator(executable());
28     CHECK(page_allocator->DiscardSystemPages(
29         reinterpret_cast<void*>(memory_area.begin()), memory_area.size()));
30   }
31 }
32 
InitializationMemoryFence()33 void MemoryChunk::InitializationMemoryFence() {
34   base::SeqCst_MemoryFence();
35 #ifdef THREAD_SANITIZER
36   // Since TSAN does not process memory fences, we use the following annotation
37   // to tell TSAN that there is no data race when emitting a
38   // InitializationMemoryFence. Note that the other thread still needs to
39   // perform MemoryChunk::synchronized_heap().
40   base::Release_Store(reinterpret_cast<base::AtomicWord*>(&heap_),
41                       reinterpret_cast<base::AtomicWord>(heap_));
42 #endif
43 }
44 
DecrementWriteUnprotectCounterAndMaybeSetPermissions(PageAllocator::Permission permission)45 void MemoryChunk::DecrementWriteUnprotectCounterAndMaybeSetPermissions(
46     PageAllocator::Permission permission) {
47   DCHECK(permission == PageAllocator::kRead ||
48          permission == PageAllocator::kReadExecute);
49   DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
50   DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
51   // Decrementing the write_unprotect_counter_ and changing the page
52   // protection mode has to be atomic.
53   base::MutexGuard guard(page_protection_change_mutex_);
54   if (write_unprotect_counter_ == 0) {
55     // This is a corner case that may happen when we have a
56     // CodeSpaceMemoryModificationScope open and this page was newly
57     // added.
58     return;
59   }
60   write_unprotect_counter_--;
61   DCHECK_LT(write_unprotect_counter_, kMaxWriteUnprotectCounter);
62   if (write_unprotect_counter_ == 0) {
63     Address protect_start =
64         address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
65     size_t page_size = MemoryAllocator::GetCommitPageSize();
66     DCHECK(IsAligned(protect_start, page_size));
67     size_t protect_size = RoundUp(area_size(), page_size);
68     CHECK(reservation_.SetPermissions(protect_start, protect_size, permission));
69   }
70 }
71 
SetReadable()72 void MemoryChunk::SetReadable() {
73   DecrementWriteUnprotectCounterAndMaybeSetPermissions(PageAllocator::kRead);
74 }
75 
SetReadAndExecutable()76 void MemoryChunk::SetReadAndExecutable() {
77   DCHECK(!FLAG_jitless);
78   DecrementWriteUnprotectCounterAndMaybeSetPermissions(
79       PageAllocator::kReadExecute);
80 }
81 
SetCodeModificationPermissions()82 void MemoryChunk::SetCodeModificationPermissions() {
83   DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
84   DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
85   // Incrementing the write_unprotect_counter_ and changing the page
86   // protection mode has to be atomic.
87   base::MutexGuard guard(page_protection_change_mutex_);
88   write_unprotect_counter_++;
89   DCHECK_LE(write_unprotect_counter_, kMaxWriteUnprotectCounter);
90   if (write_unprotect_counter_ == 1) {
91     Address unprotect_start =
92         address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
93     size_t page_size = MemoryAllocator::GetCommitPageSize();
94     DCHECK(IsAligned(unprotect_start, page_size));
95     size_t unprotect_size = RoundUp(area_size(), page_size);
96     // We may use RWX pages to write code. Some CPUs have optimisations to push
97     // updates to code to the icache through a fast path, and they may filter
98     // updates based on the written memory being executable.
99     CHECK(reservation_.SetPermissions(
100         unprotect_start, unprotect_size,
101         MemoryChunk::GetCodeModificationPermission()));
102   }
103 }
104 
SetDefaultCodePermissions()105 void MemoryChunk::SetDefaultCodePermissions() {
106   if (FLAG_jitless) {
107     SetReadable();
108   } else {
109     SetReadAndExecutable();
110   }
111 }
112 
113 namespace {
114 
DefaultWritableCodePermissions()115 PageAllocator::Permission DefaultWritableCodePermissions() {
116   return FLAG_jitless ? PageAllocator::kReadWrite
117                       : PageAllocator::kReadWriteExecute;
118 }
119 
120 }  // namespace
121 
MemoryChunk(Heap * heap,BaseSpace * space,size_t chunk_size,Address area_start,Address area_end,VirtualMemory reservation,Executability executable,PageSize page_size)122 MemoryChunk::MemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size,
123                          Address area_start, Address area_end,
124                          VirtualMemory reservation, Executability executable,
125                          PageSize page_size)
126     : BasicMemoryChunk(heap, space, chunk_size, area_start, area_end,
127                        std::move(reservation)) {
128   base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_NEW], nullptr);
129   base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_OLD], nullptr);
130   base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_SHARED], nullptr);
131   if (V8_EXTERNAL_CODE_SPACE_BOOL) {
132     base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_CODE], nullptr);
133   }
134   base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_NEW], nullptr);
135   base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_OLD], nullptr);
136   base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_SHARED],
137                                        nullptr);
138   invalidated_slots_[OLD_TO_NEW] = nullptr;
139   invalidated_slots_[OLD_TO_OLD] = nullptr;
140   if (V8_EXTERNAL_CODE_SPACE_BOOL) {
141     // Not actually used but initialize anyway for predictability.
142     invalidated_slots_[OLD_TO_CODE] = nullptr;
143   }
144   progress_bar_.Initialize();
145   set_concurrent_sweeping_state(ConcurrentSweepingState::kDone);
146   page_protection_change_mutex_ = new base::Mutex();
147   write_unprotect_counter_ = 0;
148   mutex_ = new base::Mutex();
149   young_generation_bitmap_ = nullptr;
150 
151   external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
152   external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] = 0;
153 
154   categories_ = nullptr;
155 
156   heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(this,
157                                                                         0);
158   if (executable == EXECUTABLE) {
159     SetFlag(IS_EXECUTABLE);
160     if (heap->write_protect_code_memory()) {
161       write_unprotect_counter_ =
162           heap->code_space_memory_modification_scope_depth();
163     } else {
164       size_t page_size = MemoryAllocator::GetCommitPageSize();
165       DCHECK(IsAligned(area_start_, page_size));
166       size_t area_size = RoundUp(area_end_ - area_start_, page_size);
167       CHECK(reservation_.SetPermissions(area_start_, area_size,
168                                         DefaultWritableCodePermissions()));
169     }
170   }
171 
172   if (owner()->identity() == CODE_SPACE) {
173     code_object_registry_ = new CodeObjectRegistry();
174   } else {
175     code_object_registry_ = nullptr;
176   }
177 
178   possibly_empty_buckets_.Initialize();
179 
180   if (page_size == PageSize::kRegular) {
181     active_system_pages_.Init(MemoryChunkLayout::kMemoryChunkHeaderSize,
182                               MemoryAllocator::GetCommitPageSizeBits(), size());
183   } else {
184     // We do not track active system pages for large pages.
185     active_system_pages_.Clear();
186   }
187 
188   // All pages of a shared heap need to be marked with this flag.
189   if (heap->IsShared()) SetFlag(MemoryChunk::IN_SHARED_HEAP);
190 
191 #ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
192   chunk->object_start_bitmap_ = ObjectStartBitmap(chunk->area_start());
193 #endif
194 
195 #ifdef DEBUG
196   ValidateOffsets(this);
197 #endif
198 }
199 
CommittedPhysicalMemory() const200 size_t MemoryChunk::CommittedPhysicalMemory() const {
201   if (!base::OS::HasLazyCommits() || IsLargePage()) return size();
202   return active_system_pages_.Size(MemoryAllocator::GetCommitPageSizeBits());
203 }
204 
SetOldGenerationPageFlags(bool is_marking)205 void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
206   if (is_marking) {
207     SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
208     SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
209     SetFlag(MemoryChunk::INCREMENTAL_MARKING);
210   } else {
211     ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
212     SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
213     ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
214   }
215 }
216 
SetYoungGenerationPageFlags(bool is_marking)217 void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
218   SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
219   if (is_marking) {
220     SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
221     SetFlag(MemoryChunk::INCREMENTAL_MARKING);
222   } else {
223     ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
224     ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
225   }
226 }
227 // -----------------------------------------------------------------------------
228 // MemoryChunk implementation
229 
ReleaseAllocatedMemoryNeededForWritableChunk()230 void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
231   if (mutex_ != nullptr) {
232     delete mutex_;
233     mutex_ = nullptr;
234   }
235   if (page_protection_change_mutex_ != nullptr) {
236     delete page_protection_change_mutex_;
237     page_protection_change_mutex_ = nullptr;
238   }
239   if (code_object_registry_ != nullptr) {
240     delete code_object_registry_;
241     code_object_registry_ = nullptr;
242   }
243 
244   possibly_empty_buckets_.Release();
245   ReleaseSlotSet<OLD_TO_NEW>();
246   ReleaseSlotSet<OLD_TO_OLD>();
247   if (V8_EXTERNAL_CODE_SPACE_BOOL) ReleaseSlotSet<OLD_TO_CODE>();
248   ReleaseTypedSlotSet<OLD_TO_NEW>();
249   ReleaseTypedSlotSet<OLD_TO_OLD>();
250   ReleaseInvalidatedSlots<OLD_TO_NEW>();
251   ReleaseInvalidatedSlots<OLD_TO_OLD>();
252 
253   if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
254 
255   if (!IsLargePage()) {
256     Page* page = static_cast<Page*>(this);
257     page->ReleaseFreeListCategories();
258   }
259 }
260 
ReleaseAllAllocatedMemory()261 void MemoryChunk::ReleaseAllAllocatedMemory() {
262   ReleaseAllocatedMemoryNeededForWritableChunk();
263 }
264 
265 template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
266 template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
267 template V8_EXPORT_PRIVATE SlotSet*
268 MemoryChunk::AllocateSlotSet<OLD_TO_SHARED>();
269 #ifdef V8_EXTERNAL_CODE_SPACE
270 template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_CODE>();
271 #endif  // V8_EXTERNAL_CODE_SPACE
272 
273 template <RememberedSetType type>
AllocateSlotSet()274 SlotSet* MemoryChunk::AllocateSlotSet() {
275   return AllocateSlotSet(&slot_set_[type]);
276 }
277 
AllocateSlotSet(SlotSet ** slot_set)278 SlotSet* MemoryChunk::AllocateSlotSet(SlotSet** slot_set) {
279   SlotSet* new_slot_set = SlotSet::Allocate(buckets());
280   SlotSet* old_slot_set = base::AsAtomicPointer::AcquireRelease_CompareAndSwap(
281       slot_set, nullptr, new_slot_set);
282   if (old_slot_set != nullptr) {
283     SlotSet::Delete(new_slot_set, buckets());
284     new_slot_set = old_slot_set;
285   }
286   DCHECK(new_slot_set);
287   return new_slot_set;
288 }
289 
290 template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
291 template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
292 template void MemoryChunk::ReleaseSlotSet<OLD_TO_SHARED>();
293 #ifdef V8_EXTERNAL_CODE_SPACE
294 template void MemoryChunk::ReleaseSlotSet<OLD_TO_CODE>();
295 #endif  // V8_EXTERNAL_CODE_SPACE
296 
297 template <RememberedSetType type>
ReleaseSlotSet()298 void MemoryChunk::ReleaseSlotSet() {
299   ReleaseSlotSet(&slot_set_[type]);
300 }
301 
ReleaseSlotSet(SlotSet ** slot_set)302 void MemoryChunk::ReleaseSlotSet(SlotSet** slot_set) {
303   if (*slot_set) {
304     SlotSet::Delete(*slot_set, buckets());
305     *slot_set = nullptr;
306   }
307 }
308 
309 template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_NEW>();
310 template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_OLD>();
311 template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_SHARED>();
312 
313 template <RememberedSetType type>
AllocateTypedSlotSet()314 TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
315   TypedSlotSet* typed_slot_set = new TypedSlotSet(address());
316   TypedSlotSet* old_value = base::AsAtomicPointer::Release_CompareAndSwap(
317       &typed_slot_set_[type], nullptr, typed_slot_set);
318   if (old_value != nullptr) {
319     delete typed_slot_set;
320     typed_slot_set = old_value;
321   }
322   DCHECK(typed_slot_set);
323   return typed_slot_set;
324 }
325 
326 template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_NEW>();
327 template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_OLD>();
328 template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_SHARED>();
329 
330 template <RememberedSetType type>
ReleaseTypedSlotSet()331 void MemoryChunk::ReleaseTypedSlotSet() {
332   TypedSlotSet* typed_slot_set = typed_slot_set_[type];
333   if (typed_slot_set) {
334     typed_slot_set_[type] = nullptr;
335     delete typed_slot_set;
336   }
337 }
338 
339 template InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots<OLD_TO_NEW>();
340 template InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots<OLD_TO_OLD>();
341 
342 template <RememberedSetType type>
AllocateInvalidatedSlots()343 InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() {
344   DCHECK_NULL(invalidated_slots_[type]);
345   invalidated_slots_[type] = new InvalidatedSlots();
346   return invalidated_slots_[type];
347 }
348 
349 template void MemoryChunk::ReleaseInvalidatedSlots<OLD_TO_NEW>();
350 template void MemoryChunk::ReleaseInvalidatedSlots<OLD_TO_OLD>();
351 
352 template <RememberedSetType type>
ReleaseInvalidatedSlots()353 void MemoryChunk::ReleaseInvalidatedSlots() {
354   if (invalidated_slots_[type]) {
355     delete invalidated_slots_[type];
356     invalidated_slots_[type] = nullptr;
357   }
358 }
359 
360 template V8_EXPORT_PRIVATE void
361 MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object);
362 template V8_EXPORT_PRIVATE void
363 MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object);
364 
365 template <RememberedSetType type>
RegisterObjectWithInvalidatedSlots(HeapObject object)366 void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) {
367   bool skip_slot_recording;
368 
369   if (type == OLD_TO_NEW) {
370     skip_slot_recording = InYoungGeneration();
371   } else {
372     skip_slot_recording = ShouldSkipEvacuationSlotRecording();
373   }
374 
375   if (skip_slot_recording) {
376     return;
377   }
378 
379   if (invalidated_slots<type>() == nullptr) {
380     AllocateInvalidatedSlots<type>();
381   }
382 
383   invalidated_slots<type>()->insert(object);
384 }
385 
InvalidateRecordedSlots(HeapObject object)386 void MemoryChunk::InvalidateRecordedSlots(HeapObject object) {
387   if (V8_DISABLE_WRITE_BARRIERS_BOOL) return;
388   if (heap()->incremental_marking()->IsCompacting()) {
389     // We cannot check slot_set_[OLD_TO_OLD] here, since the
390     // concurrent markers might insert slots concurrently.
391     RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
392   }
393 
394   if (slot_set_[OLD_TO_NEW] != nullptr)
395     RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
396 }
397 
398 template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(
399     HeapObject object);
400 template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(
401     HeapObject object);
402 
403 template <RememberedSetType type>
RegisteredObjectWithInvalidatedSlots(HeapObject object)404 bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) {
405   if (invalidated_slots<type>() == nullptr) {
406     return false;
407   }
408   return invalidated_slots<type>()->find(object) !=
409          invalidated_slots<type>()->end();
410 }
411 
AllocateYoungGenerationBitmap()412 void MemoryChunk::AllocateYoungGenerationBitmap() {
413   DCHECK_NULL(young_generation_bitmap_);
414   young_generation_bitmap_ =
415       static_cast<Bitmap*>(base::Calloc(1, Bitmap::kSize));
416 }
417 
ReleaseYoungGenerationBitmap()418 void MemoryChunk::ReleaseYoungGenerationBitmap() {
419   DCHECK_NOT_NULL(young_generation_bitmap_);
420   base::Free(young_generation_bitmap_);
421   young_generation_bitmap_ = nullptr;
422 }
423 
424 #ifdef DEBUG
ValidateOffsets(MemoryChunk * chunk)425 void MemoryChunk::ValidateOffsets(MemoryChunk* chunk) {
426   // Note that we cannot use offsetof because MemoryChunk is not a POD.
427   DCHECK_EQ(reinterpret_cast<Address>(&chunk->slot_set_) - chunk->address(),
428             MemoryChunkLayout::kSlotSetOffset);
429   DCHECK_EQ(reinterpret_cast<Address>(&chunk->progress_bar_) - chunk->address(),
430             MemoryChunkLayout::kProgressBarOffset);
431   DCHECK_EQ(
432       reinterpret_cast<Address>(&chunk->live_byte_count_) - chunk->address(),
433       MemoryChunkLayout::kLiveByteCountOffset);
434   DCHECK_EQ(
435       reinterpret_cast<Address>(&chunk->typed_slot_set_) - chunk->address(),
436       MemoryChunkLayout::kTypedSlotSetOffset);
437   DCHECK_EQ(
438       reinterpret_cast<Address>(&chunk->invalidated_slots_) - chunk->address(),
439       MemoryChunkLayout::kInvalidatedSlotsOffset);
440   DCHECK_EQ(reinterpret_cast<Address>(&chunk->mutex_) - chunk->address(),
441             MemoryChunkLayout::kMutexOffset);
442   DCHECK_EQ(reinterpret_cast<Address>(&chunk->concurrent_sweeping_) -
443                 chunk->address(),
444             MemoryChunkLayout::kConcurrentSweepingOffset);
445   DCHECK_EQ(reinterpret_cast<Address>(&chunk->page_protection_change_mutex_) -
446                 chunk->address(),
447             MemoryChunkLayout::kPageProtectionChangeMutexOffset);
448   DCHECK_EQ(reinterpret_cast<Address>(&chunk->write_unprotect_counter_) -
449                 chunk->address(),
450             MemoryChunkLayout::kWriteUnprotectCounterOffset);
451   DCHECK_EQ(reinterpret_cast<Address>(&chunk->external_backing_store_bytes_) -
452                 chunk->address(),
453             MemoryChunkLayout::kExternalBackingStoreBytesOffset);
454   DCHECK_EQ(reinterpret_cast<Address>(&chunk->list_node_) - chunk->address(),
455             MemoryChunkLayout::kListNodeOffset);
456   DCHECK_EQ(reinterpret_cast<Address>(&chunk->categories_) - chunk->address(),
457             MemoryChunkLayout::kCategoriesOffset);
458   DCHECK_EQ(
459       reinterpret_cast<Address>(&chunk->young_generation_live_byte_count_) -
460           chunk->address(),
461       MemoryChunkLayout::kYoungGenerationLiveByteCountOffset);
462   DCHECK_EQ(reinterpret_cast<Address>(&chunk->young_generation_bitmap_) -
463                 chunk->address(),
464             MemoryChunkLayout::kYoungGenerationBitmapOffset);
465   DCHECK_EQ(reinterpret_cast<Address>(&chunk->code_object_registry_) -
466                 chunk->address(),
467             MemoryChunkLayout::kCodeObjectRegistryOffset);
468   DCHECK_EQ(reinterpret_cast<Address>(&chunk->possibly_empty_buckets_) -
469                 chunk->address(),
470             MemoryChunkLayout::kPossiblyEmptyBucketsOffset);
471 }
472 #endif
473 
474 }  // namespace internal
475 }  // namespace v8
476