• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/store-buffer.h"
6 
7 #include <algorithm>
8 
9 #include "src/base/macros.h"
10 #include "src/base/template-utils.h"
11 #include "src/counters.h"
12 #include "src/heap/incremental-marking.h"
13 #include "src/heap/store-buffer-inl.h"
14 #include "src/isolate.h"
15 #include "src/objects-inl.h"
16 #include "src/v8.h"
17 
18 namespace v8 {
19 namespace internal {
20 
StoreBuffer(Heap * heap)21 StoreBuffer::StoreBuffer(Heap* heap)
22     : heap_(heap), top_(nullptr), current_(0), mode_(NOT_IN_GC) {
23   for (int i = 0; i < kStoreBuffers; i++) {
24     start_[i] = nullptr;
25     limit_[i] = nullptr;
26     lazy_top_[i] = nullptr;
27   }
28   task_running_ = false;
29   insertion_callback = &InsertDuringRuntime;
30   deletion_callback = &DeleteDuringRuntime;
31 }
32 
SetUp()33 void StoreBuffer::SetUp() {
34   const size_t requested_size = kStoreBufferSize * kStoreBuffers;
35   // Allocate buffer memory aligned at least to kStoreBufferSize. This lets us
36   // use a bit test to detect the ends of the buffers.
37   const size_t alignment =
38       std::max<size_t>(kStoreBufferSize, AllocatePageSize());
39   void* hint = AlignedAddress(heap_->GetRandomMmapAddr(), alignment);
40   VirtualMemory reservation;
41   if (!AlignedAllocVirtualMemory(requested_size, alignment, hint,
42                                  &reservation)) {
43     heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
44   }
45 
46   Address start = reservation.address();
47   const size_t allocated_size = reservation.size();
48 
49   start_[0] = reinterpret_cast<Address*>(start);
50   limit_[0] = start_[0] + (kStoreBufferSize / kPointerSize);
51   start_[1] = limit_[0];
52   limit_[1] = start_[1] + (kStoreBufferSize / kPointerSize);
53 
54   // Sanity check the buffers.
55   Address* vm_limit = reinterpret_cast<Address*>(start + allocated_size);
56   USE(vm_limit);
57   for (int i = 0; i < kStoreBuffers; i++) {
58     DCHECK(reinterpret_cast<Address>(start_[i]) >= reservation.address());
59     DCHECK(reinterpret_cast<Address>(limit_[i]) >= reservation.address());
60     DCHECK(start_[i] <= vm_limit);
61     DCHECK(limit_[i] <= vm_limit);
62     DCHECK_EQ(0, reinterpret_cast<Address>(limit_[i]) & kStoreBufferMask);
63   }
64 
65   // Set RW permissions only on the pages we use.
66   const size_t used_size = RoundUp(requested_size, CommitPageSize());
67   if (!reservation.SetPermissions(start, used_size,
68                                   PageAllocator::kReadWrite)) {
69     heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
70   }
71   current_ = 0;
72   top_ = start_[current_];
73   virtual_memory_.TakeControl(&reservation);
74 }
75 
TearDown()76 void StoreBuffer::TearDown() {
77   if (virtual_memory_.IsReserved()) virtual_memory_.Free();
78   top_ = nullptr;
79   for (int i = 0; i < kStoreBuffers; i++) {
80     start_[i] = nullptr;
81     limit_[i] = nullptr;
82     lazy_top_[i] = nullptr;
83   }
84 }
85 
DeleteDuringRuntime(StoreBuffer * store_buffer,Address start,Address end)86 void StoreBuffer::DeleteDuringRuntime(StoreBuffer* store_buffer, Address start,
87                                       Address end) {
88   DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
89   store_buffer->InsertDeletionIntoStoreBuffer(start, end);
90 }
91 
InsertDuringRuntime(StoreBuffer * store_buffer,Address slot)92 void StoreBuffer::InsertDuringRuntime(StoreBuffer* store_buffer, Address slot) {
93   DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
94   store_buffer->InsertIntoStoreBuffer(slot);
95 }
96 
DeleteDuringGarbageCollection(StoreBuffer * store_buffer,Address start,Address end)97 void StoreBuffer::DeleteDuringGarbageCollection(StoreBuffer* store_buffer,
98                                                 Address start, Address end) {
99   // In GC the store buffer has to be empty at any time.
100   DCHECK(store_buffer->Empty());
101   DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
102   Page* page = Page::FromAddress(start);
103   if (end) {
104     RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
105                                            SlotSet::PREFREE_EMPTY_BUCKETS);
106   } else {
107     RememberedSet<OLD_TO_NEW>::Remove(page, start);
108   }
109 }
110 
InsertDuringGarbageCollection(StoreBuffer * store_buffer,Address slot)111 void StoreBuffer::InsertDuringGarbageCollection(StoreBuffer* store_buffer,
112                                                 Address slot) {
113   DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
114   RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
115 }
116 
SetMode(StoreBufferMode mode)117 void StoreBuffer::SetMode(StoreBufferMode mode) {
118   mode_ = mode;
119   if (mode == NOT_IN_GC) {
120     insertion_callback = &InsertDuringRuntime;
121     deletion_callback = &DeleteDuringRuntime;
122   } else {
123     insertion_callback = &InsertDuringGarbageCollection;
124     deletion_callback = &DeleteDuringGarbageCollection;
125   }
126 }
127 
StoreBufferOverflow(Isolate * isolate)128 int StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
129   isolate->heap()->store_buffer()->FlipStoreBuffers();
130   isolate->counters()->store_buffer_overflows()->Increment();
131   // Called by RecordWriteCodeStubAssembler, which doesnt accept void type
132   return 0;
133 }
134 
FlipStoreBuffers()135 void StoreBuffer::FlipStoreBuffers() {
136   base::LockGuard<base::Mutex> guard(&mutex_);
137   int other = (current_ + 1) % kStoreBuffers;
138   MoveEntriesToRememberedSet(other);
139   lazy_top_[current_] = top_;
140   current_ = other;
141   top_ = start_[current_];
142 
143   if (!task_running_ && FLAG_concurrent_store_buffer) {
144     task_running_ = true;
145     V8::GetCurrentPlatform()->CallOnWorkerThread(
146         base::make_unique<Task>(heap_->isolate(), this));
147   }
148 }
149 
MoveEntriesToRememberedSet(int index)150 void StoreBuffer::MoveEntriesToRememberedSet(int index) {
151   if (!lazy_top_[index]) return;
152   DCHECK_GE(index, 0);
153   DCHECK_LT(index, kStoreBuffers);
154   Address last_inserted_addr = kNullAddress;
155 
156   // We are taking the chunk map mutex here because the page lookup of addr
157   // below may require us to check if addr is part of a large page.
158   base::LockGuard<base::Mutex> guard(heap_->lo_space()->chunk_map_mutex());
159   for (Address* current = start_[index]; current < lazy_top_[index];
160        current++) {
161     Address addr = *current;
162     MemoryChunk* chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
163     if (IsDeletionAddress(addr)) {
164       last_inserted_addr = kNullAddress;
165       current++;
166       Address end = *current;
167       DCHECK(!IsDeletionAddress(end));
168       addr = UnmarkDeletionAddress(addr);
169       if (end) {
170         RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, addr, end,
171                                                SlotSet::PREFREE_EMPTY_BUCKETS);
172       } else {
173         RememberedSet<OLD_TO_NEW>::Remove(chunk, addr);
174       }
175     } else {
176       DCHECK(!IsDeletionAddress(addr));
177       if (addr != last_inserted_addr) {
178         RememberedSet<OLD_TO_NEW>::Insert(chunk, addr);
179         last_inserted_addr = addr;
180       }
181     }
182   }
183   lazy_top_[index] = nullptr;
184 }
185 
MoveAllEntriesToRememberedSet()186 void StoreBuffer::MoveAllEntriesToRememberedSet() {
187   base::LockGuard<base::Mutex> guard(&mutex_);
188   int other = (current_ + 1) % kStoreBuffers;
189   MoveEntriesToRememberedSet(other);
190   lazy_top_[current_] = top_;
191   MoveEntriesToRememberedSet(current_);
192   top_ = start_[current_];
193 }
194 
ConcurrentlyProcessStoreBuffer()195 void StoreBuffer::ConcurrentlyProcessStoreBuffer() {
196   base::LockGuard<base::Mutex> guard(&mutex_);
197   int other = (current_ + 1) % kStoreBuffers;
198   MoveEntriesToRememberedSet(other);
199   task_running_ = false;
200 }
201 
202 }  // namespace internal
203 }  // namespace v8
204