• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_STORE_BUFFER_H_
6 #define V8_STORE_BUFFER_H_
7 
8 #include "src/allocation.h"
9 #include "src/base/logging.h"
10 #include "src/base/platform/platform.h"
11 #include "src/globals.h"
12 
13 namespace v8 {
14 namespace internal {
15 
16 class Page;
17 class PagedSpace;
18 class StoreBuffer;
19 
20 typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
21 
22 // Used to implement the write barrier by collecting addresses of pointers
23 // between spaces.
24 class StoreBuffer {
25  public:
26   explicit StoreBuffer(Heap* heap);
27 
28   static void StoreBufferOverflow(Isolate* isolate);
29 
30   void SetUp();
31   void TearDown();
32 
33   // This is used to add addresses to the store buffer non-concurrently.
34   inline void Mark(Address addr);
35 
36   // This is used to add addresses to the store buffer when multiple threads
37   // may operate on the store buffer.
38   inline void MarkSynchronized(Address addr);
39 
40   // This is used by the heap traversal to enter the addresses into the store
41   // buffer that should still be in the store buffer after GC.  It enters
42   // addresses directly into the old buffer because the GC starts by wiping the
43   // old buffer and thereafter only visits each cell once so there is no need
44   // to attempt to remove any dupes.  During the first part of a GC we
45   // are using the store buffer to access the old spaces and at the same time
46   // we are rebuilding the store buffer using this function.  There is, however
47   // no issue of overwriting the buffer we are iterating over, because this
48   // stage of the scavenge can only reduce the number of addresses in the store
49   // buffer (some objects are promoted so pointers to them do not need to be in
50   // the store buffer).  The later parts of the GC scan the pages that are
51   // exempt from the store buffer and process the promotion queue.  These steps
52   // can overflow this buffer.  We check for this and on overflow we call the
53   // callback set up with the StoreBufferRebuildScope object.
54   inline void EnterDirectlyIntoStoreBuffer(Address addr);
55 
56   // Iterates over all pointers that go from old space to new space.  It will
57   // delete the store buffer as it starts so the callback should reenter
58   // surviving old-to-new pointers into the store buffer to rebuild it.
59   void IteratePointersToNewSpace(ObjectSlotCallback callback);
60 
61   static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
62   static const int kStoreBufferSize = kStoreBufferOverflowBit;
63   static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
64   static const int kOldStoreBufferLength = kStoreBufferLength * 16;
65   static const int kHashSetLengthLog2 = 12;
66   static const int kHashSetLength = 1 << kHashSetLengthLog2;
67 
68   void Compact();
69 
70   void GCPrologue();
71   void GCEpilogue();
72 
Limit()73   Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); }
Start()74   Object*** Start() { return reinterpret_cast<Object***>(old_start_); }
Top()75   Object*** Top() { return reinterpret_cast<Object***>(old_top_); }
SetTop(Object *** top)76   void SetTop(Object*** top) {
77     DCHECK(top >= Start());
78     DCHECK(top <= Limit());
79     old_top_ = reinterpret_cast<Address*>(top);
80   }
81 
old_buffer_is_sorted()82   bool old_buffer_is_sorted() { return old_buffer_is_sorted_; }
old_buffer_is_filtered()83   bool old_buffer_is_filtered() { return old_buffer_is_filtered_; }
84 
85   void EnsureSpace(intptr_t space_needed);
86   void Verify();
87 
88   bool PrepareForIteration();
89 
90   void Filter(int flag);
91 
92   // Eliminates all stale store buffer entries from the store buffer, i.e.,
93   // slots that are not part of live objects anymore. This method must be
94   // called after marking, when the whole transitive closure is known and
95   // must be called before sweeping when mark bits are still intact.
96   void ClearInvalidStoreBufferEntries();
97   void VerifyValidStoreBufferEntries();
98 
99  private:
100   Heap* heap_;
101 
102   // The store buffer is divided up into a new buffer that is constantly being
103   // filled by mutator activity and an old buffer that is filled with the data
104   // from the new buffer after compression.
105   Address* start_;
106   Address* limit_;
107 
108   Address* old_start_;
109   Address* old_limit_;
110   Address* old_top_;
111   Address* old_reserved_limit_;
112   base::VirtualMemory* old_virtual_memory_;
113 
114   bool old_buffer_is_sorted_;
115   bool old_buffer_is_filtered_;
116   bool during_gc_;
117   // The garbage collector iterates over many pointers to new space that are not
118   // handled by the store buffer.  This flag indicates whether the pointers
119   // found by the callbacks should be added to the store buffer or not.
120   bool store_buffer_rebuilding_enabled_;
121   StoreBufferCallback callback_;
122   bool may_move_store_buffer_entries_;
123 
124   base::VirtualMemory* virtual_memory_;
125 
126   // Two hash sets used for filtering.
127   // If address is in the hash set then it is guaranteed to be in the
128   // old part of the store buffer.
129   uintptr_t* hash_set_1_;
130   uintptr_t* hash_set_2_;
131   bool hash_sets_are_empty_;
132 
133   // Used for synchronization of concurrent store buffer access.
134   base::Mutex mutex_;
135 
136   void ClearFilteringHashSets();
137 
138   bool SpaceAvailable(intptr_t space_needed);
139   void ExemptPopularPages(int prime_sample_step, int threshold);
140 
141   void ProcessOldToNewSlot(Address slot_address,
142                            ObjectSlotCallback slot_callback);
143 
144   void FindPointersToNewSpaceInRegion(Address start, Address end,
145                                       ObjectSlotCallback slot_callback);
146 
147   void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback);
148 
149 #ifdef VERIFY_HEAP
150   void VerifyPointers(LargeObjectSpace* space);
151 #endif
152 
153   friend class DontMoveStoreBufferEntriesScope;
154   friend class FindPointersToNewSpaceVisitor;
155   friend class StoreBufferRebuildScope;
156 };
157 
158 
159 class StoreBufferRebuilder {
160  public:
StoreBufferRebuilder(StoreBuffer * store_buffer)161   explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
162       : store_buffer_(store_buffer) {}
163 
164   void Callback(MemoryChunk* page, StoreBufferEvent event);
165 
166  private:
167   StoreBuffer* store_buffer_;
168 
169   // We record in this variable how full the store buffer was when we started
170   // iterating over the current page, finding pointers to new space.  If the
171   // store buffer overflows again we can exempt the page from the store buffer
172   // by rewinding to this point instead of having to search the store buffer.
173   Object*** start_of_current_page_;
174   // The current page we are scanning in the store buffer iterator.
175   MemoryChunk* current_page_;
176 };
177 
178 
179 class StoreBufferRebuildScope {
180  public:
StoreBufferRebuildScope(Heap * heap,StoreBuffer * store_buffer,StoreBufferCallback callback)181   explicit StoreBufferRebuildScope(Heap* heap, StoreBuffer* store_buffer,
182                                    StoreBufferCallback callback)
183       : store_buffer_(store_buffer),
184         stored_state_(store_buffer->store_buffer_rebuilding_enabled_),
185         stored_callback_(store_buffer->callback_) {
186     store_buffer_->store_buffer_rebuilding_enabled_ = true;
187     store_buffer_->callback_ = callback;
188     (*callback)(heap, NULL, kStoreBufferStartScanningPagesEvent);
189   }
190 
~StoreBufferRebuildScope()191   ~StoreBufferRebuildScope() {
192     store_buffer_->callback_ = stored_callback_;
193     store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_;
194   }
195 
196  private:
197   StoreBuffer* store_buffer_;
198   bool stored_state_;
199   StoreBufferCallback stored_callback_;
200 };
201 
202 
203 class DontMoveStoreBufferEntriesScope {
204  public:
DontMoveStoreBufferEntriesScope(StoreBuffer * store_buffer)205   explicit DontMoveStoreBufferEntriesScope(StoreBuffer* store_buffer)
206       : store_buffer_(store_buffer),
207         stored_state_(store_buffer->may_move_store_buffer_entries_) {
208     store_buffer_->may_move_store_buffer_entries_ = false;
209   }
210 
~DontMoveStoreBufferEntriesScope()211   ~DontMoveStoreBufferEntriesScope() {
212     store_buffer_->may_move_store_buffer_entries_ = stored_state_;
213   }
214 
215  private:
216   StoreBuffer* store_buffer_;
217   bool stored_state_;
218 };
219 }  // namespace internal
220 }  // namespace v8
221 
222 #endif  // V8_STORE_BUFFER_H_
223