• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/concurrent-allocator.h"
6 
7 #include "src/common/globals.h"
8 #include "src/execution/isolate.h"
9 #include "src/handles/persistent-handles.h"
10 #include "src/heap/concurrent-allocator-inl.h"
11 #include "src/heap/heap.h"
12 #include "src/heap/local-heap-inl.h"
13 #include "src/heap/local-heap.h"
14 #include "src/heap/marking.h"
15 #include "src/heap/memory-chunk.h"
16 #include "src/heap/parked-scope.h"
17 
18 namespace v8 {
19 namespace internal {
20 
RunInternal()21 void StressConcurrentAllocatorTask::RunInternal() {
22   Heap* heap = isolate_->heap();
23   LocalHeap local_heap(heap, ThreadKind::kBackground);
24   UnparkedScope unparked_scope(&local_heap);
25 
26   const int kNumIterations = 2000;
27   const int kSmallObjectSize = 10 * kTaggedSize;
28   const int kMediumObjectSize = 8 * KB;
29   const int kLargeObjectSize =
30       static_cast<int>(MemoryChunk::kPageSize -
31                        MemoryChunkLayout::ObjectStartOffsetInDataPage());
32 
33   for (int i = 0; i < kNumIterations; i++) {
34     // Isolate tear down started, stop allocation...
35     if (heap->gc_state() == Heap::TEAR_DOWN) return;
36 
37     AllocationResult result = local_heap.AllocateRaw(
38         kSmallObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
39         AllocationAlignment::kTaggedAligned);
40     if (!result.IsFailure()) {
41       heap->CreateFillerObjectAtBackground(
42           result.ToAddress(), kSmallObjectSize,
43           ClearFreedMemoryMode::kDontClearFreedMemory);
44     } else {
45       local_heap.TryPerformCollection();
46     }
47 
48     result = local_heap.AllocateRaw(kMediumObjectSize, AllocationType::kOld,
49                                     AllocationOrigin::kRuntime,
50                                     AllocationAlignment::kTaggedAligned);
51     if (!result.IsFailure()) {
52       heap->CreateFillerObjectAtBackground(
53           result.ToAddress(), kMediumObjectSize,
54           ClearFreedMemoryMode::kDontClearFreedMemory);
55     } else {
56       local_heap.TryPerformCollection();
57     }
58 
59     result = local_heap.AllocateRaw(kLargeObjectSize, AllocationType::kOld,
60                                     AllocationOrigin::kRuntime,
61                                     AllocationAlignment::kTaggedAligned);
62     if (!result.IsFailure()) {
63       heap->CreateFillerObjectAtBackground(
64           result.ToAddress(), kLargeObjectSize,
65           ClearFreedMemoryMode::kDontClearFreedMemory);
66     } else {
67       local_heap.TryPerformCollection();
68     }
69     local_heap.Safepoint();
70   }
71 
72   Schedule(isolate_);
73 }
74 
75 // static
Schedule(Isolate * isolate)76 void StressConcurrentAllocatorTask::Schedule(Isolate* isolate) {
77   auto task = std::make_unique<StressConcurrentAllocatorTask>(isolate);
78   const double kDelayInSeconds = 0.1;
79   V8::GetCurrentPlatform()->CallDelayedOnWorkerThread(std::move(task),
80                                                       kDelayInSeconds);
81 }
82 
FreeLinearAllocationArea()83 void ConcurrentAllocator::FreeLinearAllocationArea() {
84   // The code page of the linear allocation area needs to be unprotected
85   // because we are going to write a filler into that memory area below.
86   base::Optional<CodePageMemoryModificationScope> optional_scope;
87   if (lab_.IsValid() && space_->identity() == CODE_SPACE) {
88     optional_scope.emplace(MemoryChunk::FromAddress(lab_.top()));
89   }
90   lab_.CloseAndMakeIterable();
91 }
92 
MakeLinearAllocationAreaIterable()93 void ConcurrentAllocator::MakeLinearAllocationAreaIterable() {
94   // The code page of the linear allocation area needs to be unprotected
95   // because we are going to write a filler into that memory area below.
96   base::Optional<CodePageMemoryModificationScope> optional_scope;
97   if (lab_.IsValid() && space_->identity() == CODE_SPACE) {
98     optional_scope.emplace(MemoryChunk::FromAddress(lab_.top()));
99   }
100   lab_.MakeIterable();
101 }
102 
MarkLinearAllocationAreaBlack()103 void ConcurrentAllocator::MarkLinearAllocationAreaBlack() {
104   Address top = lab_.top();
105   Address limit = lab_.limit();
106 
107   if (top != kNullAddress && top != limit) {
108     Page::FromAllocationAreaAddress(top)->CreateBlackAreaBackground(top, limit);
109   }
110 }
111 
UnmarkLinearAllocationArea()112 void ConcurrentAllocator::UnmarkLinearAllocationArea() {
113   Address top = lab_.top();
114   Address limit = lab_.limit();
115 
116   if (top != kNullAddress && top != limit) {
117     Page::FromAllocationAreaAddress(top)->DestroyBlackAreaBackground(top,
118                                                                      limit);
119   }
120 }
121 
AllocateInLabSlow(int object_size,AllocationAlignment alignment,AllocationOrigin origin)122 AllocationResult ConcurrentAllocator::AllocateInLabSlow(
123     int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
124   if (!EnsureLab(origin)) {
125     return AllocationResult::Failure();
126   }
127 
128   AllocationResult allocation = lab_.AllocateRawAligned(object_size, alignment);
129   DCHECK(!allocation.IsFailure());
130 
131   return allocation;
132 }
133 
EnsureLab(AllocationOrigin origin)134 bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
135   auto result = space_->RawRefillLabBackground(
136       local_heap_, kLabSize, kMaxLabSize, kTaggedAligned, origin);
137   if (!result) return false;
138 
139   if (IsBlackAllocationEnabled()) {
140     Address top = result->first;
141     Address limit = top + result->second;
142     Page::FromAllocationAreaAddress(top)->CreateBlackAreaBackground(top, limit);
143   }
144 
145   HeapObject object = HeapObject::FromAddress(result->first);
146   LocalAllocationBuffer saved_lab = std::move(lab_);
147   lab_ = LocalAllocationBuffer::FromResult(
148       space_->heap(), AllocationResult::FromObject(object), result->second);
149   DCHECK(lab_.IsValid());
150   if (!lab_.TryMerge(&saved_lab)) {
151     saved_lab.CloseAndMakeIterable();
152   }
153   return true;
154 }
155 
AllocateOutsideLab(int object_size,AllocationAlignment alignment,AllocationOrigin origin)156 AllocationResult ConcurrentAllocator::AllocateOutsideLab(
157     int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
158   auto result = space_->RawRefillLabBackground(local_heap_, object_size,
159                                                object_size, alignment, origin);
160   if (!result) return AllocationResult::Failure();
161 
162   HeapObject object = HeapObject::FromAddress(result->first);
163 
164   if (IsBlackAllocationEnabled()) {
165     owning_heap()->incremental_marking()->MarkBlackBackground(object,
166                                                               object_size);
167   }
168 
169   return AllocationResult::FromObject(object);
170 }
171 
IsBlackAllocationEnabled() const172 bool ConcurrentAllocator::IsBlackAllocationEnabled() const {
173   return owning_heap()->incremental_marking()->black_allocation();
174 }
175 
owning_heap() const176 Heap* ConcurrentAllocator::owning_heap() const { return space_->heap(); }
177 
178 }  // namespace internal
179 }  // namespace v8
180