• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_STORE_BUFFER_H_
6 #define V8_STORE_BUFFER_H_
7 
8 #include "src/allocation.h"
9 #include "src/base/logging.h"
10 #include "src/base/platform/platform.h"
11 #include "src/cancelable-task.h"
12 #include "src/globals.h"
13 #include "src/heap/remembered-set.h"
14 #include "src/heap/slot-set.h"
15 
16 namespace v8 {
17 namespace internal {
18 
19 // Intermediate buffer that accumulates old-to-new stores from the generated
20 // code. Moreover, it stores invalid old-to-new slots with two entries.
21 // The first is a tagged address of the start of the invalid range, the second
22 // one is the end address of the invalid range or null if there is just one slot
23 // that needs to be removed from the remembered set. On buffer overflow the
24 // slots are moved to the remembered set.
25 class StoreBuffer {
26  public:
27   enum StoreBufferMode { IN_GC, NOT_IN_GC };
28 
29   static const int kStoreBufferSize = 1 << (11 + kPointerSizeLog2);
30   static const int kStoreBufferMask = kStoreBufferSize - 1;
31   static const int kStoreBuffers = 2;
32   static const intptr_t kDeletionTag = 1;
33 
34   V8_EXPORT_PRIVATE static void StoreBufferOverflow(Isolate* isolate);
35 
36   explicit StoreBuffer(Heap* heap);
37   void SetUp();
38   void TearDown();
39 
40   // Used to add entries from generated code.
top_address()41   inline Address* top_address() { return reinterpret_cast<Address*>(&top_); }
42 
43   // Moves entries from a specific store buffer to the remembered set. This
44   // method takes a lock.
45   void MoveEntriesToRememberedSet(int index);
46 
47   // This method ensures that all used store buffer entries are transfered to
48   // the remembered set.
49   void MoveAllEntriesToRememberedSet();
50 
IsDeletionAddress(Address address)51   inline bool IsDeletionAddress(Address address) const {
52     return reinterpret_cast<intptr_t>(address) & kDeletionTag;
53   }
54 
MarkDeletionAddress(Address address)55   inline Address MarkDeletionAddress(Address address) {
56     return reinterpret_cast<Address>(reinterpret_cast<intptr_t>(address) |
57                                      kDeletionTag);
58   }
59 
UnmarkDeletionAddress(Address address)60   inline Address UnmarkDeletionAddress(Address address) {
61     return reinterpret_cast<Address>(reinterpret_cast<intptr_t>(address) &
62                                      ~kDeletionTag);
63   }
64 
65   // If we only want to delete a single slot, end should be set to null which
66   // will be written into the second field. When processing the store buffer
67   // the more efficient Remove method will be called in this case.
68   void DeleteEntry(Address start, Address end = nullptr) {
69     // Deletions coming from the GC are directly deleted from the remembered
70     // set. Deletions coming from the runtime are added to the store buffer
71     // to allow concurrent processing.
72     deletion_callback(this, start, end);
73   }
74 
DeleteDuringGarbageCollection(StoreBuffer * store_buffer,Address start,Address end)75   static void DeleteDuringGarbageCollection(StoreBuffer* store_buffer,
76                                             Address start, Address end) {
77     // In GC the store buffer has to be empty at any time.
78     DCHECK(store_buffer->Empty());
79     DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
80     Page* page = Page::FromAddress(start);
81     if (end) {
82       RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
83                                              SlotSet::PREFREE_EMPTY_BUCKETS);
84     } else {
85       RememberedSet<OLD_TO_NEW>::Remove(page, start);
86     }
87   }
88 
DeleteDuringRuntime(StoreBuffer * store_buffer,Address start,Address end)89   static void DeleteDuringRuntime(StoreBuffer* store_buffer, Address start,
90                                   Address end) {
91     DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
92     store_buffer->InsertDeletionIntoStoreBuffer(start, end);
93   }
94 
InsertDeletionIntoStoreBuffer(Address start,Address end)95   void InsertDeletionIntoStoreBuffer(Address start, Address end) {
96     if (top_ + sizeof(Address) * 2 > limit_[current_]) {
97       StoreBufferOverflow(heap_->isolate());
98     }
99     *top_ = MarkDeletionAddress(start);
100     top_++;
101     *top_ = end;
102     top_++;
103   }
104 
InsertDuringGarbageCollection(StoreBuffer * store_buffer,Address slot)105   static void InsertDuringGarbageCollection(StoreBuffer* store_buffer,
106                                             Address slot) {
107     DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
108     RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
109   }
110 
InsertDuringRuntime(StoreBuffer * store_buffer,Address slot)111   static void InsertDuringRuntime(StoreBuffer* store_buffer, Address slot) {
112     DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
113     store_buffer->InsertIntoStoreBuffer(slot);
114   }
115 
InsertIntoStoreBuffer(Address slot)116   void InsertIntoStoreBuffer(Address slot) {
117     if (top_ + sizeof(Address) > limit_[current_]) {
118       StoreBufferOverflow(heap_->isolate());
119     }
120     *top_ = slot;
121     top_++;
122   }
123 
InsertEntry(Address slot)124   void InsertEntry(Address slot) {
125     // Insertions coming from the GC are directly inserted into the remembered
126     // set. Insertions coming from the runtime are added to the store buffer to
127     // allow concurrent processing.
128     insertion_callback(this, slot);
129   }
130 
SetMode(StoreBufferMode mode)131   void SetMode(StoreBufferMode mode) {
132     mode_ = mode;
133     if (mode == NOT_IN_GC) {
134       insertion_callback = &InsertDuringRuntime;
135       deletion_callback = &DeleteDuringRuntime;
136     } else {
137       insertion_callback = &InsertDuringGarbageCollection;
138       deletion_callback = &DeleteDuringGarbageCollection;
139     }
140   }
141 
142   // Used by the concurrent processing thread to transfer entries from the
143   // store buffer to the remembered set.
144   void ConcurrentlyProcessStoreBuffer();
145 
Empty()146   bool Empty() {
147     for (int i = 0; i < kStoreBuffers; i++) {
148       if (lazy_top_[i]) {
149         return false;
150       }
151     }
152     return top_ == start_[current_];
153   }
154 
heap()155   Heap* heap() { return heap_; }
156 
157  private:
158   // There are two store buffers. If one store buffer fills up, the main thread
159   // publishes the top pointer of the store buffer that needs processing in its
160   // global lazy_top_ field. After that it start the concurrent processing
161   // thread. The concurrent processing thread uses the pointer in lazy_top_.
162   // It will grab the given mutex and transfer its entries to the remembered
163   // set. If the concurrent thread does not make progress, the main thread will
164   // perform the work.
165   // Important: there is an ordering constrained. The store buffer with the
166   // older entries has to be processed first.
167   class Task : public CancelableTask {
168    public:
Task(Isolate * isolate,StoreBuffer * store_buffer)169     Task(Isolate* isolate, StoreBuffer* store_buffer)
170         : CancelableTask(isolate), store_buffer_(store_buffer) {}
~Task()171     virtual ~Task() {}
172 
173    private:
RunInternal()174     void RunInternal() override {
175       store_buffer_->ConcurrentlyProcessStoreBuffer();
176     }
177     StoreBuffer* store_buffer_;
178     DISALLOW_COPY_AND_ASSIGN(Task);
179   };
180 
mode()181   StoreBufferMode mode() const { return mode_; }
182 
183   void FlipStoreBuffers();
184 
185   Heap* heap_;
186 
187   Address* top_;
188 
189   // The start and the limit of the buffer that contains store slots
190   // added from the generated code. We have two chunks of store buffers.
191   // Whenever one fills up, we notify a concurrent processing thread and
192   // use the other empty one in the meantime.
193   Address* start_[kStoreBuffers];
194   Address* limit_[kStoreBuffers];
195 
196   // At most one lazy_top_ pointer is set at any time.
197   Address* lazy_top_[kStoreBuffers];
198   base::Mutex mutex_;
199 
200   // We only want to have at most one concurrent processing tas running.
201   bool task_running_;
202 
203   // Points to the current buffer in use.
204   int current_;
205 
206   // During GC, entries are directly added to the remembered set without
207   // going through the store buffer. This is signaled by a special
208   // IN_GC mode.
209   StoreBufferMode mode_;
210 
211   base::VirtualMemory* virtual_memory_;
212 
213   // Callbacks are more efficient than reading out the gc state for every
214   // store buffer operation.
215   void (*insertion_callback)(StoreBuffer*, Address);
216   void (*deletion_callback)(StoreBuffer*, Address, Address);
217 };
218 
219 }  // namespace internal
220 }  // namespace v8
221 
222 #endif  // V8_STORE_BUFFER_H_
223