• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_SLOT_SET_H_
6 #define V8_HEAP_SLOT_SET_H_
7 
8 #include <map>
9 #include <memory>
10 #include <stack>
11 
12 #include "src/base/atomic-utils.h"
13 #include "src/base/bit-field.h"
14 #include "src/base/bits.h"
15 #include "src/objects/compressed-slots.h"
16 #include "src/objects/slots.h"
17 #include "src/utils/allocation.h"
18 #include "src/utils/utils.h"
19 #include "testing/gtest/include/gtest/gtest_prod.h"  // nogncheck
20 
21 namespace v8 {
22 namespace internal {
23 
24 enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
25 
26 // Possibly empty buckets (buckets that do not contain any slots) are discovered
27 // by the scavenger. Buckets might become non-empty when promoting objects later
28 // or in another thread, so all those buckets need to be revisited.
29 // Track possibly empty buckets within a SlotSet in this data structure. The
30 // class contains a word-sized bitmap, in case more bits are needed the bitmap
31 // is replaced with a pointer to a malloc-allocated bitmap.
32 class PossiblyEmptyBuckets {
33  public:
PossiblyEmptyBuckets()34   PossiblyEmptyBuckets() : bitmap_(kNullAddress) {}
PossiblyEmptyBuckets(PossiblyEmptyBuckets && other)35   PossiblyEmptyBuckets(PossiblyEmptyBuckets&& other) V8_NOEXCEPT
36       : bitmap_(other.bitmap_) {
37     other.bitmap_ = kNullAddress;
38   }
39 
~PossiblyEmptyBuckets()40   ~PossiblyEmptyBuckets() { Release(); }
41 
42   PossiblyEmptyBuckets(const PossiblyEmptyBuckets&) = delete;
43   PossiblyEmptyBuckets& operator=(const PossiblyEmptyBuckets&) = delete;
44 
Initialize()45   void Initialize() {
46     bitmap_ = kNullAddress;
47     DCHECK(!IsAllocated());
48   }
49 
Release()50   void Release() {
51     if (IsAllocated()) {
52       AlignedFree(BitmapArray());
53     }
54     bitmap_ = kNullAddress;
55     DCHECK(!IsAllocated());
56   }
57 
Insert(size_t bucket_index,size_t buckets)58   void Insert(size_t bucket_index, size_t buckets) {
59     if (IsAllocated()) {
60       InsertAllocated(bucket_index);
61     } else if (bucket_index + 1 < kBitsPerWord) {
62       bitmap_ |= static_cast<uintptr_t>(1) << (bucket_index + 1);
63     } else {
64       Allocate(buckets);
65       InsertAllocated(bucket_index);
66     }
67   }
68 
Contains(size_t bucket_index)69   bool Contains(size_t bucket_index) {
70     if (IsAllocated()) {
71       size_t word_idx = bucket_index / kBitsPerWord;
72       uintptr_t* word = BitmapArray() + word_idx;
73       return *word &
74              (static_cast<uintptr_t>(1) << (bucket_index % kBitsPerWord));
75     } else if (bucket_index + 1 < kBitsPerWord) {
76       return bitmap_ & (static_cast<uintptr_t>(1) << (bucket_index + 1));
77     } else {
78       return false;
79     }
80   }
81 
IsEmpty()82   bool IsEmpty() { return bitmap_ == kNullAddress; }
83 
84  private:
85   Address bitmap_;
86   static const Address kPointerTag = 1;
87   static const int kWordSize = sizeof(uintptr_t);
88   static const int kBitsPerWord = kWordSize * kBitsPerByte;
89 
IsAllocated()90   bool IsAllocated() { return bitmap_ & kPointerTag; }
91 
Allocate(size_t buckets)92   void Allocate(size_t buckets) {
93     DCHECK(!IsAllocated());
94     size_t words = WordsForBuckets(buckets);
95     uintptr_t* ptr = reinterpret_cast<uintptr_t*>(
96         AlignedAlloc(words * kWordSize, kSystemPointerSize));
97     ptr[0] = bitmap_ >> 1;
98 
99     for (size_t word_idx = 1; word_idx < words; word_idx++) {
100       ptr[word_idx] = 0;
101     }
102     bitmap_ = reinterpret_cast<Address>(ptr) + kPointerTag;
103     DCHECK(IsAllocated());
104   }
105 
InsertAllocated(size_t bucket_index)106   void InsertAllocated(size_t bucket_index) {
107     DCHECK(IsAllocated());
108     size_t word_idx = bucket_index / kBitsPerWord;
109     uintptr_t* word = BitmapArray() + word_idx;
110     *word |= static_cast<uintptr_t>(1) << (bucket_index % kBitsPerWord);
111   }
112 
WordsForBuckets(size_t buckets)113   static size_t WordsForBuckets(size_t buckets) {
114     return (buckets + kBitsPerWord - 1) / kBitsPerWord;
115   }
116 
BitmapArray()117   uintptr_t* BitmapArray() {
118     DCHECK(IsAllocated());
119     return reinterpret_cast<uintptr_t*>(bitmap_ & ~kPointerTag);
120   }
121 
122   FRIEND_TEST(PossiblyEmptyBucketsTest, WordsForBuckets);
123 };
124 
125 STATIC_ASSERT(std::is_standard_layout<PossiblyEmptyBuckets>::value);
126 STATIC_ASSERT(sizeof(PossiblyEmptyBuckets) == kSystemPointerSize);
127 
128 // Data structure for maintaining a set of slots in a standard (non-large)
129 // page.
130 // The data structure assumes that the slots are pointer size aligned and
131 // splits the valid slot offset range into buckets.
132 // Each bucket is a bitmap with a bit corresponding to a single slot offset.
133 class SlotSet {
134  public:
135   enum EmptyBucketMode {
136     FREE_EMPTY_BUCKETS,  // An empty bucket will be deallocated immediately.
137     KEEP_EMPTY_BUCKETS   // An empty bucket will be kept.
138   };
139 
140   SlotSet() = delete;
141 
Allocate(size_t buckets)142   static SlotSet* Allocate(size_t buckets) {
143     //  SlotSet* slot_set --+
144     //                      |
145     //                      v
146     //    +-----------------+-------------------------+
147     //    | initial buckets |     buckets array       |
148     //    +-----------------+-------------------------+
149     //       pointer-sized    pointer-sized * buckets
150     //
151     //
152     // The SlotSet pointer points to the beginning of the buckets array for
153     // faster access in the write barrier. The number of buckets is needed for
154     // calculating the size of this data structure.
155     size_t buckets_size = buckets * sizeof(Bucket*);
156     size_t size = kInitialBucketsSize + buckets_size;
157     void* allocation = AlignedAlloc(size, kSystemPointerSize);
158     SlotSet* slot_set = reinterpret_cast<SlotSet*>(
159         reinterpret_cast<uint8_t*>(allocation) + kInitialBucketsSize);
160     DCHECK(
161         IsAligned(reinterpret_cast<uintptr_t>(slot_set), kSystemPointerSize));
162 #ifdef DEBUG
163     *slot_set->initial_buckets() = buckets;
164 #endif
165     for (size_t i = 0; i < buckets; i++) {
166       *slot_set->bucket(i) = nullptr;
167     }
168     return slot_set;
169   }
170 
Delete(SlotSet * slot_set,size_t buckets)171   static void Delete(SlotSet* slot_set, size_t buckets) {
172     if (slot_set == nullptr) return;
173 
174     for (size_t i = 0; i < buckets; i++) {
175       slot_set->ReleaseBucket(i);
176     }
177 
178 #ifdef DEBUG
179     size_t initial_buckets = *slot_set->initial_buckets();
180 
181     for (size_t i = buckets; i < initial_buckets; i++) {
182       DCHECK_NULL(*slot_set->bucket(i));
183     }
184 #endif
185 
186     AlignedFree(reinterpret_cast<uint8_t*>(slot_set) - kInitialBucketsSize);
187   }
188 
BucketsForSize(size_t size)189   static size_t BucketsForSize(size_t size) {
190     return (size + (kTaggedSize * kBitsPerBucket) - 1) >>
191            (kTaggedSizeLog2 + kBitsPerBucketLog2);
192   }
193 
194   // Converts the slot offset into bucket index.
BucketForSlot(size_t slot_offset)195   static size_t BucketForSlot(size_t slot_offset) {
196     DCHECK(IsAligned(slot_offset, kTaggedSize));
197     return slot_offset >> (kTaggedSizeLog2 + kBitsPerBucketLog2);
198   }
199 
200   // The slot offset specifies a slot at address page_start_ + slot_offset.
201   // AccessMode defines whether there can be concurrent access on the buckets
202   // or not.
203   template <AccessMode access_mode>
Insert(size_t slot_offset)204   void Insert(size_t slot_offset) {
205     size_t bucket_index;
206     int cell_index, bit_index;
207     SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
208     Bucket* bucket = LoadBucket<access_mode>(bucket_index);
209     if (bucket == nullptr) {
210       bucket = new Bucket;
211       if (!SwapInNewBucket<access_mode>(bucket_index, bucket)) {
212         delete bucket;
213         bucket = LoadBucket<access_mode>(bucket_index);
214       }
215     }
216     // Check that monotonicity is preserved, i.e., once a bucket is set we do
217     // not free it concurrently.
218     DCHECK(bucket != nullptr);
219     DCHECK_EQ(bucket->cells(), LoadBucket<access_mode>(bucket_index)->cells());
220     uint32_t mask = 1u << bit_index;
221     if ((bucket->LoadCell<access_mode>(cell_index) & mask) == 0) {
222       bucket->SetCellBits<access_mode>(cell_index, mask);
223     }
224   }
225 
226   // The slot offset specifies a slot at address page_start_ + slot_offset.
227   // Returns true if the set contains the slot.
Contains(size_t slot_offset)228   bool Contains(size_t slot_offset) {
229     size_t bucket_index;
230     int cell_index, bit_index;
231     SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
232     Bucket* bucket = LoadBucket(bucket_index);
233     if (bucket == nullptr) return false;
234     return (bucket->LoadCell(cell_index) & (1u << bit_index)) != 0;
235   }
236 
237   // The slot offset specifies a slot at address page_start_ + slot_offset.
Remove(size_t slot_offset)238   void Remove(size_t slot_offset) {
239     size_t bucket_index;
240     int cell_index, bit_index;
241     SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
242     Bucket* bucket = LoadBucket(bucket_index);
243     if (bucket != nullptr) {
244       uint32_t cell = bucket->LoadCell(cell_index);
245       uint32_t bit_mask = 1u << bit_index;
246       if (cell & bit_mask) {
247         bucket->ClearCellBits(cell_index, bit_mask);
248       }
249     }
250   }
251 
252   // The slot offsets specify a range of slots at addresses:
253   // [page_start_ + start_offset ... page_start_ + end_offset).
RemoveRange(size_t start_offset,size_t end_offset,size_t buckets,EmptyBucketMode mode)254   void RemoveRange(size_t start_offset, size_t end_offset, size_t buckets,
255                    EmptyBucketMode mode) {
256     CHECK_LE(end_offset, buckets * kBitsPerBucket * kTaggedSize);
257     DCHECK_LE(start_offset, end_offset);
258     size_t start_bucket;
259     int start_cell, start_bit;
260     SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit);
261     size_t end_bucket;
262     int end_cell, end_bit;
263     SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit);
264     uint32_t start_mask = (1u << start_bit) - 1;
265     uint32_t end_mask = ~((1u << end_bit) - 1);
266     Bucket* bucket;
267     if (start_bucket == end_bucket && start_cell == end_cell) {
268       bucket = LoadBucket(start_bucket);
269       if (bucket != nullptr) {
270         bucket->ClearCellBits(start_cell, ~(start_mask | end_mask));
271       }
272       return;
273     }
274     size_t current_bucket = start_bucket;
275     int current_cell = start_cell;
276     bucket = LoadBucket(current_bucket);
277     if (bucket != nullptr) {
278       bucket->ClearCellBits(current_cell, ~start_mask);
279     }
280     current_cell++;
281     if (current_bucket < end_bucket) {
282       if (bucket != nullptr) {
283         ClearBucket(bucket, current_cell, kCellsPerBucket);
284       }
285       // The rest of the current bucket is cleared.
286       // Move on to the next bucket.
287       current_bucket++;
288       current_cell = 0;
289     }
290     DCHECK(current_bucket == end_bucket ||
291            (current_bucket < end_bucket && current_cell == 0));
292     while (current_bucket < end_bucket) {
293       if (mode == FREE_EMPTY_BUCKETS) {
294         ReleaseBucket(current_bucket);
295       } else {
296         DCHECK(mode == KEEP_EMPTY_BUCKETS);
297         bucket = LoadBucket(current_bucket);
298         if (bucket != nullptr) {
299           ClearBucket(bucket, 0, kCellsPerBucket);
300         }
301       }
302       current_bucket++;
303     }
304     // All buckets between start_bucket and end_bucket are cleared.
305     DCHECK(current_bucket == end_bucket);
306     if (current_bucket == buckets) return;
307     bucket = LoadBucket(current_bucket);
308     DCHECK(current_cell <= end_cell);
309     if (bucket == nullptr) return;
310     while (current_cell < end_cell) {
311       bucket->StoreCell(current_cell, 0);
312       current_cell++;
313     }
314     // All cells between start_cell and end_cell are cleared.
315     DCHECK(current_bucket == end_bucket && current_cell == end_cell);
316     bucket->ClearCellBits(end_cell, ~end_mask);
317   }
318 
319   // The slot offset specifies a slot at address page_start_ + slot_offset.
Lookup(size_t slot_offset)320   bool Lookup(size_t slot_offset) {
321     size_t bucket_index;
322     int cell_index, bit_index;
323     SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
324     Bucket* bucket = LoadBucket(bucket_index);
325     if (bucket == nullptr) return false;
326     return (bucket->LoadCell(cell_index) & (1u << bit_index)) != 0;
327   }
328 
329   // Iterate over all slots in the set and for each slot invoke the callback.
330   // If the callback returns REMOVE_SLOT then the slot is removed from the set.
331   // Returns the new number of slots.
332   //
333   // Iteration can be performed concurrently with other operations that use
334   // atomic access mode such as insertion and removal. However there is no
335   // guarantee about ordering and linearizability.
336   //
337   // Sample usage:
338   // Iterate([](MaybeObjectSlot slot) {
339   //    if (good(slot)) return KEEP_SLOT;
340   //    else return REMOVE_SLOT;
341   // });
342   //
343   // Releases memory for empty buckets with FREE_EMPTY_BUCKETS.
344   template <typename Callback>
Iterate(Address chunk_start,size_t start_bucket,size_t end_bucket,Callback callback,EmptyBucketMode mode)345   size_t Iterate(Address chunk_start, size_t start_bucket, size_t end_bucket,
346                  Callback callback, EmptyBucketMode mode) {
347     return Iterate(chunk_start, start_bucket, end_bucket, callback,
348                    [this, mode](size_t bucket_index) {
349                      if (mode == EmptyBucketMode::FREE_EMPTY_BUCKETS) {
350                        ReleaseBucket(bucket_index);
351                      }
352                    });
353   }
354 
355   // Similar to Iterate but marks potentially empty buckets internally. Stores
356   // true in empty_bucket_found in case a potentially empty bucket was found.
357   // Assumes that the possibly empty-array was already cleared by
358   // CheckPossiblyEmptyBuckets.
359   template <typename Callback>
IterateAndTrackEmptyBuckets(Address chunk_start,size_t start_bucket,size_t end_bucket,Callback callback,PossiblyEmptyBuckets * possibly_empty_buckets)360   size_t IterateAndTrackEmptyBuckets(
361       Address chunk_start, size_t start_bucket, size_t end_bucket,
362       Callback callback, PossiblyEmptyBuckets* possibly_empty_buckets) {
363     return Iterate(chunk_start, start_bucket, end_bucket, callback,
364                    [possibly_empty_buckets, end_bucket](size_t bucket_index) {
365                      possibly_empty_buckets->Insert(bucket_index, end_bucket);
366                    });
367   }
368 
FreeEmptyBuckets(size_t buckets)369   bool FreeEmptyBuckets(size_t buckets) {
370     bool empty = true;
371     for (size_t bucket_index = 0; bucket_index < buckets; bucket_index++) {
372       if (!FreeBucketIfEmpty(bucket_index)) {
373         empty = false;
374       }
375     }
376 
377     return empty;
378   }
379 
380   // Check whether possibly empty buckets are really empty. Empty buckets are
381   // freed and the possibly empty state is cleared for all buckets.
CheckPossiblyEmptyBuckets(size_t buckets,PossiblyEmptyBuckets * possibly_empty_buckets)382   bool CheckPossiblyEmptyBuckets(size_t buckets,
383                                  PossiblyEmptyBuckets* possibly_empty_buckets) {
384     bool empty = true;
385     for (size_t bucket_index = 0; bucket_index < buckets; bucket_index++) {
386       Bucket* bucket = LoadBucket<AccessMode::NON_ATOMIC>(bucket_index);
387       if (bucket) {
388         if (possibly_empty_buckets->Contains(bucket_index)) {
389           if (bucket->IsEmpty()) {
390             ReleaseBucket<AccessMode::NON_ATOMIC>(bucket_index);
391           } else {
392             empty = false;
393           }
394         } else {
395           empty = false;
396         }
397       } else {
398         // Unfortunately we cannot DCHECK here that the corresponding bit in
399         // possibly_empty_buckets is not set. After scavenge, the
400         // MergeOldToNewRememberedSets operation might remove a recorded bucket.
401       }
402     }
403 
404     possibly_empty_buckets->Release();
405 
406     return empty;
407   }
408 
409   static const int kCellsPerBucket = 32;
410   static const int kCellsPerBucketLog2 = 5;
411   static const int kCellSizeBytesLog2 = 2;
412   static const int kCellSizeBytes = 1 << kCellSizeBytesLog2;
413   static const int kBitsPerCell = 32;
414   static const int kBitsPerCellLog2 = 5;
415   static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell;
416   static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
417   static const int kBucketsRegularPage =
418       (1 << kPageSizeBits) / kTaggedSize / kCellsPerBucket / kBitsPerCell;
419 
420   class Bucket : public Malloced {
421     uint32_t cells_[kCellsPerBucket];
422 
423    public:
Bucket()424     Bucket() {
425       for (int i = 0; i < kCellsPerBucket; i++) {
426         cells_[i] = 0;
427       }
428     }
429 
cells()430     uint32_t* cells() { return cells_; }
cell(int cell_index)431     uint32_t* cell(int cell_index) { return cells() + cell_index; }
432 
433     template <AccessMode access_mode = AccessMode::ATOMIC>
LoadCell(int cell_index)434     uint32_t LoadCell(int cell_index) {
435       DCHECK_LT(cell_index, kCellsPerBucket);
436       if (access_mode == AccessMode::ATOMIC)
437         return base::AsAtomic32::Acquire_Load(cells() + cell_index);
438       return *(cells() + cell_index);
439     }
440 
441     template <AccessMode access_mode = AccessMode::ATOMIC>
SetCellBits(int cell_index,uint32_t mask)442     void SetCellBits(int cell_index, uint32_t mask) {
443       if (access_mode == AccessMode::ATOMIC) {
444         base::AsAtomic32::SetBits(cell(cell_index), mask, mask);
445       } else {
446         uint32_t* c = cell(cell_index);
447         *c = (*c & ~mask) | mask;
448       }
449     }
450 
ClearCellBits(int cell_index,uint32_t mask)451     void ClearCellBits(int cell_index, uint32_t mask) {
452       base::AsAtomic32::SetBits(cell(cell_index), 0u, mask);
453     }
454 
StoreCell(int cell_index,uint32_t value)455     void StoreCell(int cell_index, uint32_t value) {
456       base::AsAtomic32::Release_Store(cell(cell_index), value);
457     }
458 
IsEmpty()459     bool IsEmpty() {
460       for (int i = 0; i < kCellsPerBucket; i++) {
461         if (cells_[i] != 0) {
462           return false;
463         }
464       }
465       return true;
466     }
467   };
468 
469  private:
470   template <typename Callback, typename EmptyBucketCallback>
Iterate(Address chunk_start,size_t start_bucket,size_t end_bucket,Callback callback,EmptyBucketCallback empty_bucket_callback)471   size_t Iterate(Address chunk_start, size_t start_bucket, size_t end_bucket,
472                  Callback callback, EmptyBucketCallback empty_bucket_callback) {
473     size_t new_count = 0;
474     for (size_t bucket_index = start_bucket; bucket_index < end_bucket;
475          bucket_index++) {
476       Bucket* bucket = LoadBucket(bucket_index);
477       if (bucket != nullptr) {
478         size_t in_bucket_count = 0;
479         size_t cell_offset = bucket_index << kBitsPerBucketLog2;
480         for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
481           uint32_t cell = bucket->LoadCell(i);
482           if (cell) {
483             uint32_t old_cell = cell;
484             uint32_t mask = 0;
485             while (cell) {
486               int bit_offset = base::bits::CountTrailingZeros(cell);
487               uint32_t bit_mask = 1u << bit_offset;
488               Address slot = (cell_offset + bit_offset) << kTaggedSizeLog2;
489               if (callback(MaybeObjectSlot(chunk_start + slot)) == KEEP_SLOT) {
490                 ++in_bucket_count;
491               } else {
492                 mask |= bit_mask;
493               }
494               cell ^= bit_mask;
495             }
496             uint32_t new_cell = old_cell & ~mask;
497             if (old_cell != new_cell) {
498               bucket->ClearCellBits(i, mask);
499             }
500           }
501         }
502         if (in_bucket_count == 0) {
503           empty_bucket_callback(bucket_index);
504         }
505         new_count += in_bucket_count;
506       }
507     }
508     return new_count;
509   }
510 
FreeBucketIfEmpty(size_t bucket_index)511   bool FreeBucketIfEmpty(size_t bucket_index) {
512     Bucket* bucket = LoadBucket<AccessMode::NON_ATOMIC>(bucket_index);
513     if (bucket != nullptr) {
514       if (bucket->IsEmpty()) {
515         ReleaseBucket<AccessMode::NON_ATOMIC>(bucket_index);
516       } else {
517         return false;
518       }
519     }
520 
521     return true;
522   }
523 
ClearBucket(Bucket * bucket,int start_cell,int end_cell)524   void ClearBucket(Bucket* bucket, int start_cell, int end_cell) {
525     DCHECK_GE(start_cell, 0);
526     DCHECK_LE(end_cell, kCellsPerBucket);
527     int current_cell = start_cell;
528     while (current_cell < kCellsPerBucket) {
529       bucket->StoreCell(current_cell, 0);
530       current_cell++;
531     }
532   }
533 
534   template <AccessMode access_mode = AccessMode::ATOMIC>
ReleaseBucket(size_t bucket_index)535   void ReleaseBucket(size_t bucket_index) {
536     Bucket* bucket = LoadBucket<access_mode>(bucket_index);
537     StoreBucket<access_mode>(bucket_index, nullptr);
538     delete bucket;
539   }
540 
541   template <AccessMode access_mode = AccessMode::ATOMIC>
LoadBucket(Bucket ** bucket)542   Bucket* LoadBucket(Bucket** bucket) {
543     if (access_mode == AccessMode::ATOMIC)
544       return base::AsAtomicPointer::Acquire_Load(bucket);
545     return *bucket;
546   }
547 
548   template <AccessMode access_mode = AccessMode::ATOMIC>
LoadBucket(size_t bucket_index)549   Bucket* LoadBucket(size_t bucket_index) {
550     return LoadBucket(bucket(bucket_index));
551   }
552 
553   template <AccessMode access_mode = AccessMode::ATOMIC>
StoreBucket(Bucket ** bucket,Bucket * value)554   void StoreBucket(Bucket** bucket, Bucket* value) {
555     if (access_mode == AccessMode::ATOMIC) {
556       base::AsAtomicPointer::Release_Store(bucket, value);
557     } else {
558       *bucket = value;
559     }
560   }
561 
562   template <AccessMode access_mode = AccessMode::ATOMIC>
StoreBucket(size_t bucket_index,Bucket * value)563   void StoreBucket(size_t bucket_index, Bucket* value) {
564     StoreBucket(bucket(bucket_index), value);
565   }
566 
567   template <AccessMode access_mode = AccessMode::ATOMIC>
SwapInNewBucket(size_t bucket_index,Bucket * value)568   bool SwapInNewBucket(size_t bucket_index, Bucket* value) {
569     Bucket** b = bucket(bucket_index);
570     if (access_mode == AccessMode::ATOMIC) {
571       return base::AsAtomicPointer::Release_CompareAndSwap(b, nullptr, value) ==
572              nullptr;
573     } else {
574       DCHECK_NULL(*b);
575       *b = value;
576       return true;
577     }
578   }
579 
580   // Converts the slot offset into bucket/cell/bit index.
SlotToIndices(size_t slot_offset,size_t * bucket_index,int * cell_index,int * bit_index)581   static void SlotToIndices(size_t slot_offset, size_t* bucket_index,
582                             int* cell_index, int* bit_index) {
583     DCHECK(IsAligned(slot_offset, kTaggedSize));
584     size_t slot = slot_offset >> kTaggedSizeLog2;
585     *bucket_index = slot >> kBitsPerBucketLog2;
586     *cell_index =
587         static_cast<int>((slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1));
588     *bit_index = static_cast<int>(slot & (kBitsPerCell - 1));
589   }
590 
buckets()591   Bucket** buckets() { return reinterpret_cast<Bucket**>(this); }
bucket(size_t bucket_index)592   Bucket** bucket(size_t bucket_index) { return buckets() + bucket_index; }
593 
594 #ifdef DEBUG
initial_buckets()595   size_t* initial_buckets() { return reinterpret_cast<size_t*>(this) - 1; }
596   static const int kInitialBucketsSize = sizeof(size_t);
597 #else
598   static const int kInitialBucketsSize = 0;
599 #endif
600 };
601 
602 STATIC_ASSERT(std::is_standard_layout<SlotSet>::value);
603 STATIC_ASSERT(std::is_standard_layout<SlotSet::Bucket>::value);
604 
605 enum class SlotType : uint8_t {
606   // Full pointer sized slot storing an object start address.
607   // RelocInfo::target_object/RelocInfo::set_target_object methods are used for
608   // accessing. Used when pointer is stored in the instruction stream.
609   kEmbeddedObjectFull,
610 
611   // Tagged sized slot storing an object start address.
612   // RelocInfo::target_object/RelocInfo::set_target_object methods are used for
613   // accessing. Used when pointer is stored in the instruction stream.
614   kEmbeddedObjectCompressed,
615 
616   // Full pointer sized slot storing an object start address.
617   // RelocInfo::target_object/RelocInfo::set_target_object methods are used for
618   // accessing. Used when pointer is stored in the instruction stream.
619   kEmbeddedObjectData,
620 
621   // Full pointer sized slot storing instruction start of Code object.
622   // RelocInfo::target_address/RelocInfo::set_target_address methods are used
623   // for accessing. Used when pointer is stored in the instruction stream.
624   kCodeEntry,
625 
626   // Raw full pointer sized slot. Slot is accessed directly. Used when pointer
627   // is stored in constant pool.
628   kConstPoolEmbeddedObjectFull,
629 
630   // Raw tagged sized slot. Slot is accessed directly. Used when pointer is
631   // stored in constant pool.
632   kConstPoolEmbeddedObjectCompressed,
633 
634   // Raw full pointer sized slot storing instruction start of Code object. Slot
635   // is accessed directly. Used when pointer is stored in constant pool.
636   kConstPoolCodeEntry,
637 
638   // Slot got cleared but has not been removed from the slot set.
639   kCleared,
640 
641   kLast = kCleared
642 };
643 
644 // Data structure for maintaining a list of typed slots in a page.
645 // Typed slots can only appear in Code objects, so
646 // the maximum possible offset is limited by the LargePage::kMaxCodePageSize.
647 // The implementation is a chain of chunks, where each chunk is an array of
648 // encoded (slot type, slot offset) pairs.
649 // There is no duplicate detection and we do not expect many duplicates because
650 // typed slots contain V8 internal pointers that are not directly exposed to JS.
651 class V8_EXPORT_PRIVATE TypedSlots {
652  public:
653   static const int kMaxOffset = 1 << 29;
654   TypedSlots() = default;
655   virtual ~TypedSlots();
656   void Insert(SlotType type, uint32_t offset);
657   void Merge(TypedSlots* other);
658 
659  protected:
660   using OffsetField = base::BitField<int, 0, 29>;
661   using TypeField = base::BitField<SlotType, 29, 3>;
662   struct TypedSlot {
663     uint32_t type_and_offset;
664   };
665   struct Chunk {
666     Chunk* next;
667     std::vector<TypedSlot> buffer;
668   };
669   static const size_t kInitialBufferSize = 100;
670   static const size_t kMaxBufferSize = 16 * KB;
NextCapacity(size_t capacity)671   static size_t NextCapacity(size_t capacity) {
672     return std::min({kMaxBufferSize, capacity * 2});
673   }
674   Chunk* EnsureChunk();
675   Chunk* NewChunk(Chunk* next, size_t capacity);
676   Chunk* head_ = nullptr;
677   Chunk* tail_ = nullptr;
678 };
679 
680 // A multiset of per-page typed slots that allows concurrent iteration
681 // clearing of invalid slots.
682 class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
683  public:
684   using FreeRangesMap = std::map<uint32_t, uint32_t>;
685 
686   enum IterationMode { FREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
687 
TypedSlotSet(Address page_start)688   explicit TypedSlotSet(Address page_start) : page_start_(page_start) {}
689 
690   // Iterate over all slots in the set and for each slot invoke the callback.
691   // If the callback returns REMOVE_SLOT then the slot is removed from the set.
692   // Returns the new number of slots.
693   //
694   // Sample usage:
695   // Iterate([](SlotType slot_type, Address slot_address) {
696   //    if (good(slot_type, slot_address)) return KEEP_SLOT;
697   //    else return REMOVE_SLOT;
698   // });
699   // This can run concurrently to ClearInvalidSlots().
700   template <typename Callback>
Iterate(Callback callback,IterationMode mode)701   int Iterate(Callback callback, IterationMode mode) {
702     STATIC_ASSERT(static_cast<uint8_t>(SlotType::kLast) < 8);
703     Chunk* chunk = head_;
704     Chunk* previous = nullptr;
705     int new_count = 0;
706     while (chunk != nullptr) {
707       bool empty = true;
708       for (TypedSlot& slot : chunk->buffer) {
709         SlotType type = TypeField::decode(slot.type_and_offset);
710         if (type != SlotType::kCleared) {
711           uint32_t offset = OffsetField::decode(slot.type_and_offset);
712           Address addr = page_start_ + offset;
713           if (callback(type, addr) == KEEP_SLOT) {
714             new_count++;
715             empty = false;
716           } else {
717             slot = ClearedTypedSlot();
718           }
719         }
720       }
721       Chunk* next = chunk->next;
722       if (mode == FREE_EMPTY_CHUNKS && empty) {
723         // We remove the chunk from the list but let it still point its next
724         // chunk to allow concurrent iteration.
725         if (previous) {
726           StoreNext(previous, next);
727         } else {
728           StoreHead(next);
729         }
730 
731         delete chunk;
732       } else {
733         previous = chunk;
734       }
735       chunk = next;
736     }
737     return new_count;
738   }
739 
740   // Clears all slots that have the offset in the specified ranges.
741   // This can run concurrently to Iterate().
742   void ClearInvalidSlots(const FreeRangesMap& invalid_ranges);
743 
744   // Asserts that there are no recorded slots in the specified ranges.
745   void AssertNoInvalidSlots(const FreeRangesMap& invalid_ranges);
746 
747   // Frees empty chunks accumulated by PREFREE_EMPTY_CHUNKS.
748   void FreeToBeFreedChunks();
749 
750  private:
751   template <typename Callback>
752   void IterateSlotsInRanges(Callback callback,
753                             const FreeRangesMap& invalid_ranges);
754 
755   // Atomic operations used by Iterate and ClearInvalidSlots;
LoadNext(Chunk * chunk)756   Chunk* LoadNext(Chunk* chunk) {
757     return base::AsAtomicPointer::Relaxed_Load(&chunk->next);
758   }
StoreNext(Chunk * chunk,Chunk * next)759   void StoreNext(Chunk* chunk, Chunk* next) {
760     return base::AsAtomicPointer::Relaxed_Store(&chunk->next, next);
761   }
LoadHead()762   Chunk* LoadHead() { return base::AsAtomicPointer::Relaxed_Load(&head_); }
StoreHead(Chunk * chunk)763   void StoreHead(Chunk* chunk) {
764     base::AsAtomicPointer::Relaxed_Store(&head_, chunk);
765   }
ClearedTypedSlot()766   static TypedSlot ClearedTypedSlot() {
767     return TypedSlot{TypeField::encode(SlotType::kCleared) |
768                      OffsetField::encode(0)};
769   }
770 
771   Address page_start_;
772 };
773 
774 }  // namespace internal
775 }  // namespace v8
776 
777 #endif  // V8_HEAP_SLOT_SET_H_
778