• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_SLOT_SET_H_
6 #define V8_HEAP_SLOT_SET_H_
7 
8 #include <map>
9 #include <memory>
10 #include <stack>
11 
12 #include "src/base/atomic-utils.h"
13 #include "src/base/bit-field.h"
14 #include "src/base/bits.h"
15 #include "src/heap/worklist.h"
16 #include "src/objects/compressed-slots.h"
17 #include "src/objects/slots.h"
18 #include "src/utils/allocation.h"
19 #include "src/utils/utils.h"
20 
21 namespace v8 {
22 namespace internal {
23 
24 enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
25 
26 // Possibly empty buckets (buckets that do not contain any slots) are discovered
27 // by the scavenger. Buckets might become non-empty when promoting objects later
28 // or in another thread, so all those buckets need to be revisited.
29 // Track possibly empty buckets within a SlotSet in this data structure. The
30 // class contains a word-sized bitmap, in case more bits are needed the bitmap
31 // is replaced with a pointer to a malloc-allocated bitmap.
32 class PossiblyEmptyBuckets {
33  public:
PossiblyEmptyBuckets()34   PossiblyEmptyBuckets() : bitmap_(kNullAddress) {}
PossiblyEmptyBuckets(PossiblyEmptyBuckets && other)35   PossiblyEmptyBuckets(PossiblyEmptyBuckets&& other) V8_NOEXCEPT
36       : bitmap_(other.bitmap_) {
37     other.bitmap_ = kNullAddress;
38   }
39 
~PossiblyEmptyBuckets()40   ~PossiblyEmptyBuckets() { Release(); }
41 
Initialize()42   void Initialize() {
43     bitmap_ = kNullAddress;
44     DCHECK(!IsAllocated());
45   }
46 
Release()47   void Release() {
48     if (IsAllocated()) {
49       AlignedFree(BitmapArray());
50     }
51     bitmap_ = kNullAddress;
52     DCHECK(!IsAllocated());
53   }
54 
Insert(size_t bucket_index,size_t buckets)55   void Insert(size_t bucket_index, size_t buckets) {
56     if (IsAllocated()) {
57       InsertAllocated(bucket_index);
58     } else if (bucket_index + 1 < kBitsPerWord) {
59       bitmap_ |= static_cast<uintptr_t>(1) << (bucket_index + 1);
60     } else {
61       Allocate(buckets);
62       InsertAllocated(bucket_index);
63     }
64   }
65 
Contains(size_t bucket_index)66   bool Contains(size_t bucket_index) {
67     if (IsAllocated()) {
68       size_t word_idx = bucket_index / kBitsPerWord;
69       uintptr_t* word = BitmapArray() + word_idx;
70       return *word &
71              (static_cast<uintptr_t>(1) << (bucket_index % kBitsPerWord));
72     } else if (bucket_index + 1 < kBitsPerWord) {
73       return bitmap_ & (static_cast<uintptr_t>(1) << (bucket_index + 1));
74     } else {
75       return false;
76     }
77   }
78 
IsEmpty()79   bool IsEmpty() { return bitmap_ == kNullAddress; }
80 
81  private:
82   Address bitmap_;
83   static const Address kPointerTag = 1;
84   static const int kWordSize = sizeof(uintptr_t);
85   static const int kBitsPerWord = kWordSize * kBitsPerByte;
86 
IsAllocated()87   bool IsAllocated() { return bitmap_ & kPointerTag; }
88 
Allocate(size_t buckets)89   void Allocate(size_t buckets) {
90     DCHECK(!IsAllocated());
91     size_t words = WordsForBuckets(buckets);
92     uintptr_t* ptr = reinterpret_cast<uintptr_t*>(
93         AlignedAlloc(words * kWordSize, kSystemPointerSize));
94     ptr[0] = bitmap_ >> 1;
95 
96     for (size_t word_idx = 1; word_idx < words; word_idx++) {
97       ptr[word_idx] = 0;
98     }
99     bitmap_ = reinterpret_cast<Address>(ptr) + kPointerTag;
100     DCHECK(IsAllocated());
101   }
102 
InsertAllocated(size_t bucket_index)103   void InsertAllocated(size_t bucket_index) {
104     DCHECK(IsAllocated());
105     size_t word_idx = bucket_index / kBitsPerWord;
106     uintptr_t* word = BitmapArray() + word_idx;
107     *word |= static_cast<uintptr_t>(1) << (bucket_index % kBitsPerWord);
108   }
109 
WordsForBuckets(size_t buckets)110   static size_t WordsForBuckets(size_t buckets) {
111     return (buckets + kBitsPerWord - 1) / kBitsPerWord;
112   }
113 
BitmapArray()114   uintptr_t* BitmapArray() {
115     DCHECK(IsAllocated());
116     return reinterpret_cast<uintptr_t*>(bitmap_ & ~kPointerTag);
117   }
118 
119   FRIEND_TEST(PossiblyEmptyBucketsTest, WordsForBuckets);
120 
121   DISALLOW_COPY_AND_ASSIGN(PossiblyEmptyBuckets);
122 };
123 
124 STATIC_ASSERT(std::is_standard_layout<PossiblyEmptyBuckets>::value);
125 STATIC_ASSERT(sizeof(PossiblyEmptyBuckets) == kSystemPointerSize);
126 
127 // Data structure for maintaining a set of slots in a standard (non-large)
128 // page.
129 // The data structure assumes that the slots are pointer size aligned and
130 // splits the valid slot offset range into buckets.
131 // Each bucket is a bitmap with a bit corresponding to a single slot offset.
132 class SlotSet {
133  public:
134   enum EmptyBucketMode {
135     FREE_EMPTY_BUCKETS,  // An empty bucket will be deallocated immediately.
136     KEEP_EMPTY_BUCKETS   // An empty bucket will be kept.
137   };
138 
139   SlotSet() = delete;
140 
Allocate(size_t buckets)141   static SlotSet* Allocate(size_t buckets) {
142     //  SlotSet* slot_set --+
143     //                      |
144     //                      v
145     //    +-----------------+-------------------------+
146     //    | initial buckets |     buckets array       |
147     //    +-----------------+-------------------------+
148     //       pointer-sized    pointer-sized * buckets
149     //
150     //
151     // The SlotSet pointer points to the beginning of the buckets array for
152     // faster access in the write barrier. The number of buckets is needed for
153     // calculating the size of this data structure.
154     size_t buckets_size = buckets * sizeof(Bucket*);
155     size_t size = kInitialBucketsSize + buckets_size;
156     void* allocation = AlignedAlloc(size, kSystemPointerSize);
157     SlotSet* slot_set = reinterpret_cast<SlotSet*>(
158         reinterpret_cast<uint8_t*>(allocation) + kInitialBucketsSize);
159     DCHECK(
160         IsAligned(reinterpret_cast<uintptr_t>(slot_set), kSystemPointerSize));
161 #ifdef DEBUG
162     *slot_set->initial_buckets() = buckets;
163 #endif
164     for (size_t i = 0; i < buckets; i++) {
165       *slot_set->bucket(i) = nullptr;
166     }
167     return slot_set;
168   }
169 
Delete(SlotSet * slot_set,size_t buckets)170   static void Delete(SlotSet* slot_set, size_t buckets) {
171     if (slot_set == nullptr) return;
172 
173     for (size_t i = 0; i < buckets; i++) {
174       slot_set->ReleaseBucket(i);
175     }
176 
177 #ifdef DEBUG
178     size_t initial_buckets = *slot_set->initial_buckets();
179 
180     for (size_t i = buckets; i < initial_buckets; i++) {
181       DCHECK_NULL(*slot_set->bucket(i));
182     }
183 #endif
184 
185     AlignedFree(reinterpret_cast<uint8_t*>(slot_set) - kInitialBucketsSize);
186   }
187 
BucketsForSize(size_t size)188   static size_t BucketsForSize(size_t size) {
189     return (size + (kTaggedSize * kBitsPerBucket) - 1) >>
190            (kTaggedSizeLog2 + kBitsPerBucketLog2);
191   }
192 
193   // Converts the slot offset into bucket index.
BucketForSlot(size_t slot_offset)194   static size_t BucketForSlot(size_t slot_offset) {
195     DCHECK(IsAligned(slot_offset, kTaggedSize));
196     return slot_offset >> (kTaggedSizeLog2 + kBitsPerBucketLog2);
197   }
198 
199   // The slot offset specifies a slot at address page_start_ + slot_offset.
200   // AccessMode defines whether there can be concurrent access on the buckets
201   // or not.
202   template <AccessMode access_mode>
Insert(size_t slot_offset)203   void Insert(size_t slot_offset) {
204     size_t bucket_index;
205     int cell_index, bit_index;
206     SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
207     Bucket* bucket = LoadBucket<access_mode>(bucket_index);
208     if (bucket == nullptr) {
209       bucket = new Bucket;
210       if (!SwapInNewBucket<access_mode>(bucket_index, bucket)) {
211         delete bucket;
212         bucket = LoadBucket<access_mode>(bucket_index);
213       }
214     }
215     // Check that monotonicity is preserved, i.e., once a bucket is set we do
216     // not free it concurrently.
217     DCHECK(bucket != nullptr);
218     DCHECK_EQ(bucket->cells(), LoadBucket<access_mode>(bucket_index)->cells());
219     uint32_t mask = 1u << bit_index;
220     if ((bucket->LoadCell<access_mode>(cell_index) & mask) == 0) {
221       bucket->SetCellBits<access_mode>(cell_index, mask);
222     }
223   }
224 
225   // The slot offset specifies a slot at address page_start_ + slot_offset.
226   // Returns true if the set contains the slot.
Contains(size_t slot_offset)227   bool Contains(size_t slot_offset) {
228     size_t bucket_index;
229     int cell_index, bit_index;
230     SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
231     Bucket* bucket = LoadBucket(bucket_index);
232     if (bucket == nullptr) return false;
233     return (bucket->LoadCell(cell_index) & (1u << bit_index)) != 0;
234   }
235 
236   // The slot offset specifies a slot at address page_start_ + slot_offset.
Remove(size_t slot_offset)237   void Remove(size_t slot_offset) {
238     size_t bucket_index;
239     int cell_index, bit_index;
240     SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
241     Bucket* bucket = LoadBucket(bucket_index);
242     if (bucket != nullptr) {
243       uint32_t cell = bucket->LoadCell(cell_index);
244       uint32_t bit_mask = 1u << bit_index;
245       if (cell & bit_mask) {
246         bucket->ClearCellBits(cell_index, bit_mask);
247       }
248     }
249   }
250 
251   // The slot offsets specify a range of slots at addresses:
252   // [page_start_ + start_offset ... page_start_ + end_offset).
RemoveRange(size_t start_offset,size_t end_offset,size_t buckets,EmptyBucketMode mode)253   void RemoveRange(size_t start_offset, size_t end_offset, size_t buckets,
254                    EmptyBucketMode mode) {
255     CHECK_LE(end_offset, buckets * kBitsPerBucket * kTaggedSize);
256     DCHECK_LE(start_offset, end_offset);
257     size_t start_bucket;
258     int start_cell, start_bit;
259     SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit);
260     size_t end_bucket;
261     int end_cell, end_bit;
262     SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit);
263     uint32_t start_mask = (1u << start_bit) - 1;
264     uint32_t end_mask = ~((1u << end_bit) - 1);
265     Bucket* bucket;
266     if (start_bucket == end_bucket && start_cell == end_cell) {
267       bucket = LoadBucket(start_bucket);
268       if (bucket != nullptr) {
269         bucket->ClearCellBits(start_cell, ~(start_mask | end_mask));
270       }
271       return;
272     }
273     size_t current_bucket = start_bucket;
274     int current_cell = start_cell;
275     bucket = LoadBucket(current_bucket);
276     if (bucket != nullptr) {
277       bucket->ClearCellBits(current_cell, ~start_mask);
278     }
279     current_cell++;
280     if (current_bucket < end_bucket) {
281       if (bucket != nullptr) {
282         ClearBucket(bucket, current_cell, kCellsPerBucket);
283       }
284       // The rest of the current bucket is cleared.
285       // Move on to the next bucket.
286       current_bucket++;
287       current_cell = 0;
288     }
289     DCHECK(current_bucket == end_bucket ||
290            (current_bucket < end_bucket && current_cell == 0));
291     while (current_bucket < end_bucket) {
292       if (mode == FREE_EMPTY_BUCKETS) {
293         ReleaseBucket(current_bucket);
294       } else {
295         DCHECK(mode == KEEP_EMPTY_BUCKETS);
296         bucket = LoadBucket(current_bucket);
297         if (bucket != nullptr) {
298           ClearBucket(bucket, 0, kCellsPerBucket);
299         }
300       }
301       current_bucket++;
302     }
303     // All buckets between start_bucket and end_bucket are cleared.
304     DCHECK(current_bucket == end_bucket);
305     if (current_bucket == buckets) return;
306     bucket = LoadBucket(current_bucket);
307     DCHECK(current_cell <= end_cell);
308     if (bucket == nullptr) return;
309     while (current_cell < end_cell) {
310       bucket->StoreCell(current_cell, 0);
311       current_cell++;
312     }
313     // All cells between start_cell and end_cell are cleared.
314     DCHECK(current_bucket == end_bucket && current_cell == end_cell);
315     bucket->ClearCellBits(end_cell, ~end_mask);
316   }
317 
318   // The slot offset specifies a slot at address page_start_ + slot_offset.
Lookup(size_t slot_offset)319   bool Lookup(size_t slot_offset) {
320     size_t bucket_index;
321     int cell_index, bit_index;
322     SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
323     Bucket* bucket = LoadBucket(bucket_index);
324     if (bucket == nullptr) return false;
325     return (bucket->LoadCell(cell_index) & (1u << bit_index)) != 0;
326   }
327 
328   // Iterate over all slots in the set and for each slot invoke the callback.
329   // If the callback returns REMOVE_SLOT then the slot is removed from the set.
330   // Returns the new number of slots.
331   //
332   // Iteration can be performed concurrently with other operations that use
333   // atomic access mode such as insertion and removal. However there is no
334   // guarantee about ordering and linearizability.
335   //
336   // Sample usage:
337   // Iterate([](MaybeObjectSlot slot) {
338   //    if (good(slot)) return KEEP_SLOT;
339   //    else return REMOVE_SLOT;
340   // });
341   //
342   // Releases memory for empty buckets with FREE_EMPTY_BUCKETS.
343   template <typename Callback>
Iterate(Address chunk_start,size_t start_bucket,size_t end_bucket,Callback callback,EmptyBucketMode mode)344   size_t Iterate(Address chunk_start, size_t start_bucket, size_t end_bucket,
345                  Callback callback, EmptyBucketMode mode) {
346     return Iterate(chunk_start, start_bucket, end_bucket, callback,
347                    [this, mode](size_t bucket_index) {
348                      if (mode == EmptyBucketMode::FREE_EMPTY_BUCKETS) {
349                        ReleaseBucket(bucket_index);
350                      }
351                    });
352   }
353 
354   // Similar to Iterate but marks potentially empty buckets internally. Stores
355   // true in empty_bucket_found in case a potentially empty bucket was found.
356   // Assumes that the possibly empty-array was already cleared by
357   // CheckPossiblyEmptyBuckets.
358   template <typename Callback>
IterateAndTrackEmptyBuckets(Address chunk_start,size_t start_bucket,size_t end_bucket,Callback callback,PossiblyEmptyBuckets * possibly_empty_buckets)359   size_t IterateAndTrackEmptyBuckets(
360       Address chunk_start, size_t start_bucket, size_t end_bucket,
361       Callback callback, PossiblyEmptyBuckets* possibly_empty_buckets) {
362     return Iterate(chunk_start, start_bucket, end_bucket, callback,
363                    [possibly_empty_buckets, end_bucket](size_t bucket_index) {
364                      possibly_empty_buckets->Insert(bucket_index, end_bucket);
365                    });
366   }
367 
FreeEmptyBuckets(size_t buckets)368   bool FreeEmptyBuckets(size_t buckets) {
369     bool empty = true;
370     for (size_t bucket_index = 0; bucket_index < buckets; bucket_index++) {
371       if (!FreeBucketIfEmpty(bucket_index)) {
372         empty = false;
373       }
374     }
375 
376     return empty;
377   }
378 
379   // Check whether possibly empty buckets are really empty. Empty buckets are
380   // freed and the possibly empty state is cleared for all buckets.
CheckPossiblyEmptyBuckets(size_t buckets,PossiblyEmptyBuckets * possibly_empty_buckets)381   bool CheckPossiblyEmptyBuckets(size_t buckets,
382                                  PossiblyEmptyBuckets* possibly_empty_buckets) {
383     bool empty = true;
384     for (size_t bucket_index = 0; bucket_index < buckets; bucket_index++) {
385       Bucket* bucket = LoadBucket<AccessMode::NON_ATOMIC>(bucket_index);
386       if (bucket) {
387         if (possibly_empty_buckets->Contains(bucket_index)) {
388           if (bucket->IsEmpty()) {
389             ReleaseBucket<AccessMode::NON_ATOMIC>(bucket_index);
390           } else {
391             empty = false;
392           }
393         } else {
394           empty = false;
395         }
396       } else {
397         // Unfortunately we cannot DCHECK here that the corresponding bit in
398         // possibly_empty_buckets is not set. After scavenge, the
399         // MergeOldToNewRememberedSets operation might remove a recorded bucket.
400       }
401     }
402 
403     possibly_empty_buckets->Release();
404 
405     return empty;
406   }
407 
408   static const int kCellsPerBucket = 32;
409   static const int kCellsPerBucketLog2 = 5;
410   static const int kCellSizeBytesLog2 = 2;
411   static const int kCellSizeBytes = 1 << kCellSizeBytesLog2;
412   static const int kBitsPerCell = 32;
413   static const int kBitsPerCellLog2 = 5;
414   static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell;
415   static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
416   static const int kBucketsRegularPage =
417       (1 << kPageSizeBits) / kTaggedSize / kCellsPerBucket / kBitsPerCell;
418 
419   class Bucket : public Malloced {
420     uint32_t cells_[kCellsPerBucket];
421 
422    public:
Bucket()423     Bucket() {
424       for (int i = 0; i < kCellsPerBucket; i++) {
425         cells_[i] = 0;
426       }
427     }
428 
cells()429     uint32_t* cells() { return cells_; }
cell(int cell_index)430     uint32_t* cell(int cell_index) { return cells() + cell_index; }
431 
432     template <AccessMode access_mode = AccessMode::ATOMIC>
LoadCell(int cell_index)433     uint32_t LoadCell(int cell_index) {
434       DCHECK_LT(cell_index, kCellsPerBucket);
435       if (access_mode == AccessMode::ATOMIC)
436         return base::AsAtomic32::Acquire_Load(cells() + cell_index);
437       return *(cells() + cell_index);
438     }
439 
440     template <AccessMode access_mode = AccessMode::ATOMIC>
SetCellBits(int cell_index,uint32_t mask)441     void SetCellBits(int cell_index, uint32_t mask) {
442       if (access_mode == AccessMode::ATOMIC) {
443         base::AsAtomic32::SetBits(cell(cell_index), mask, mask);
444       } else {
445         uint32_t* c = cell(cell_index);
446         *c = (*c & ~mask) | mask;
447       }
448     }
449 
ClearCellBits(int cell_index,uint32_t mask)450     void ClearCellBits(int cell_index, uint32_t mask) {
451       base::AsAtomic32::SetBits(cell(cell_index), 0u, mask);
452     }
453 
StoreCell(int cell_index,uint32_t value)454     void StoreCell(int cell_index, uint32_t value) {
455       base::AsAtomic32::Release_Store(cell(cell_index), value);
456     }
457 
IsEmpty()458     bool IsEmpty() {
459       for (int i = 0; i < kCellsPerBucket; i++) {
460         if (cells_[i] != 0) {
461           return false;
462         }
463       }
464       return true;
465     }
466   };
467 
468  private:
469   template <typename Callback, typename EmptyBucketCallback>
Iterate(Address chunk_start,size_t start_bucket,size_t end_bucket,Callback callback,EmptyBucketCallback empty_bucket_callback)470   size_t Iterate(Address chunk_start, size_t start_bucket, size_t end_bucket,
471                  Callback callback, EmptyBucketCallback empty_bucket_callback) {
472     size_t new_count = 0;
473     for (size_t bucket_index = start_bucket; bucket_index < end_bucket;
474          bucket_index++) {
475       Bucket* bucket = LoadBucket(bucket_index);
476       if (bucket != nullptr) {
477         size_t in_bucket_count = 0;
478         size_t cell_offset = bucket_index << kBitsPerBucketLog2;
479         for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
480           uint32_t cell = bucket->LoadCell(i);
481           if (cell) {
482             uint32_t old_cell = cell;
483             uint32_t mask = 0;
484             while (cell) {
485               int bit_offset = base::bits::CountTrailingZeros(cell);
486               uint32_t bit_mask = 1u << bit_offset;
487               Address slot = (cell_offset + bit_offset) << kTaggedSizeLog2;
488               if (callback(MaybeObjectSlot(chunk_start + slot)) == KEEP_SLOT) {
489                 ++in_bucket_count;
490               } else {
491                 mask |= bit_mask;
492               }
493               cell ^= bit_mask;
494             }
495             uint32_t new_cell = old_cell & ~mask;
496             if (old_cell != new_cell) {
497               bucket->ClearCellBits(i, mask);
498             }
499           }
500         }
501         if (in_bucket_count == 0) {
502           empty_bucket_callback(bucket_index);
503         }
504         new_count += in_bucket_count;
505       }
506     }
507     return new_count;
508   }
509 
FreeBucketIfEmpty(size_t bucket_index)510   bool FreeBucketIfEmpty(size_t bucket_index) {
511     Bucket* bucket = LoadBucket<AccessMode::NON_ATOMIC>(bucket_index);
512     if (bucket != nullptr) {
513       if (bucket->IsEmpty()) {
514         ReleaseBucket<AccessMode::NON_ATOMIC>(bucket_index);
515       } else {
516         return false;
517       }
518     }
519 
520     return true;
521   }
522 
ClearBucket(Bucket * bucket,int start_cell,int end_cell)523   void ClearBucket(Bucket* bucket, int start_cell, int end_cell) {
524     DCHECK_GE(start_cell, 0);
525     DCHECK_LE(end_cell, kCellsPerBucket);
526     int current_cell = start_cell;
527     while (current_cell < kCellsPerBucket) {
528       bucket->StoreCell(current_cell, 0);
529       current_cell++;
530     }
531   }
532 
533   template <AccessMode access_mode = AccessMode::ATOMIC>
ReleaseBucket(size_t bucket_index)534   void ReleaseBucket(size_t bucket_index) {
535     Bucket* bucket = LoadBucket<access_mode>(bucket_index);
536     StoreBucket<access_mode>(bucket_index, nullptr);
537     delete bucket;
538   }
539 
540   template <AccessMode access_mode = AccessMode::ATOMIC>
LoadBucket(Bucket ** bucket)541   Bucket* LoadBucket(Bucket** bucket) {
542     if (access_mode == AccessMode::ATOMIC)
543       return base::AsAtomicPointer::Acquire_Load(bucket);
544     return *bucket;
545   }
546 
547   template <AccessMode access_mode = AccessMode::ATOMIC>
LoadBucket(size_t bucket_index)548   Bucket* LoadBucket(size_t bucket_index) {
549     return LoadBucket(bucket(bucket_index));
550   }
551 
552   template <AccessMode access_mode = AccessMode::ATOMIC>
StoreBucket(Bucket ** bucket,Bucket * value)553   void StoreBucket(Bucket** bucket, Bucket* value) {
554     if (access_mode == AccessMode::ATOMIC) {
555       base::AsAtomicPointer::Release_Store(bucket, value);
556     } else {
557       *bucket = value;
558     }
559   }
560 
561   template <AccessMode access_mode = AccessMode::ATOMIC>
StoreBucket(size_t bucket_index,Bucket * value)562   void StoreBucket(size_t bucket_index, Bucket* value) {
563     StoreBucket(bucket(bucket_index), value);
564   }
565 
566   template <AccessMode access_mode = AccessMode::ATOMIC>
SwapInNewBucket(size_t bucket_index,Bucket * value)567   bool SwapInNewBucket(size_t bucket_index, Bucket* value) {
568     Bucket** b = bucket(bucket_index);
569     if (access_mode == AccessMode::ATOMIC) {
570       return base::AsAtomicPointer::Release_CompareAndSwap(b, nullptr, value) ==
571              nullptr;
572     } else {
573       DCHECK_NULL(*b);
574       *b = value;
575       return true;
576     }
577   }
578 
579   // Converts the slot offset into bucket/cell/bit index.
SlotToIndices(size_t slot_offset,size_t * bucket_index,int * cell_index,int * bit_index)580   static void SlotToIndices(size_t slot_offset, size_t* bucket_index,
581                             int* cell_index, int* bit_index) {
582     DCHECK(IsAligned(slot_offset, kTaggedSize));
583     size_t slot = slot_offset >> kTaggedSizeLog2;
584     *bucket_index = slot >> kBitsPerBucketLog2;
585     *cell_index =
586         static_cast<int>((slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1));
587     *bit_index = static_cast<int>(slot & (kBitsPerCell - 1));
588   }
589 
buckets()590   Bucket** buckets() { return reinterpret_cast<Bucket**>(this); }
bucket(size_t bucket_index)591   Bucket** bucket(size_t bucket_index) { return buckets() + bucket_index; }
592 
593 #ifdef DEBUG
initial_buckets()594   size_t* initial_buckets() { return reinterpret_cast<size_t*>(this) - 1; }
595   static const int kInitialBucketsSize = sizeof(size_t);
596 #else
597   static const int kInitialBucketsSize = 0;
598 #endif
599 };
600 
601 STATIC_ASSERT(std::is_standard_layout<SlotSet>::value);
602 STATIC_ASSERT(std::is_standard_layout<SlotSet::Bucket>::value);
603 
604 enum SlotType {
605   FULL_EMBEDDED_OBJECT_SLOT,
606   COMPRESSED_EMBEDDED_OBJECT_SLOT,
607   FULL_OBJECT_SLOT,
608   COMPRESSED_OBJECT_SLOT,
609   CODE_TARGET_SLOT,
610   CODE_ENTRY_SLOT,
611   CLEARED_SLOT
612 };
613 
614 // Data structure for maintaining a list of typed slots in a page.
615 // Typed slots can only appear in Code objects, so
616 // the maximum possible offset is limited by the LargePage::kMaxCodePageSize.
617 // The implementation is a chain of chunks, where each chunk is an array of
618 // encoded (slot type, slot offset) pairs.
619 // There is no duplicate detection and we do not expect many duplicates because
620 // typed slots contain V8 internal pointers that are not directly exposed to JS.
621 class V8_EXPORT_PRIVATE TypedSlots {
622  public:
623   static const int kMaxOffset = 1 << 29;
624   TypedSlots() = default;
625   virtual ~TypedSlots();
626   void Insert(SlotType type, uint32_t offset);
627   void Merge(TypedSlots* other);
628 
629  protected:
630   using OffsetField = base::BitField<int, 0, 29>;
631   using TypeField = base::BitField<SlotType, 29, 3>;
632   struct TypedSlot {
633     uint32_t type_and_offset;
634   };
635   struct Chunk {
636     Chunk* next;
637     std::vector<TypedSlot> buffer;
638   };
639   static const size_t kInitialBufferSize = 100;
640   static const size_t kMaxBufferSize = 16 * KB;
NextCapacity(size_t capacity)641   static size_t NextCapacity(size_t capacity) {
642     return Min(kMaxBufferSize, capacity * 2);
643   }
644   Chunk* EnsureChunk();
645   Chunk* NewChunk(Chunk* next, size_t capacity);
646   Chunk* head_ = nullptr;
647   Chunk* tail_ = nullptr;
648 };
649 
650 // A multiset of per-page typed slots that allows concurrent iteration
651 // clearing of invalid slots.
652 class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
653  public:
654   enum IterationMode { FREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
655 
TypedSlotSet(Address page_start)656   explicit TypedSlotSet(Address page_start) : page_start_(page_start) {}
657 
658   // Iterate over all slots in the set and for each slot invoke the callback.
659   // If the callback returns REMOVE_SLOT then the slot is removed from the set.
660   // Returns the new number of slots.
661   //
662   // Sample usage:
663   // Iterate([](SlotType slot_type, Address slot_address) {
664   //    if (good(slot_type, slot_address)) return KEEP_SLOT;
665   //    else return REMOVE_SLOT;
666   // });
667   // This can run concurrently to ClearInvalidSlots().
668   template <typename Callback>
Iterate(Callback callback,IterationMode mode)669   int Iterate(Callback callback, IterationMode mode) {
670     STATIC_ASSERT(CLEARED_SLOT < 8);
671     Chunk* chunk = head_;
672     Chunk* previous = nullptr;
673     int new_count = 0;
674     while (chunk != nullptr) {
675       bool empty = true;
676       for (TypedSlot& slot : chunk->buffer) {
677         SlotType type = TypeField::decode(slot.type_and_offset);
678         if (type != CLEARED_SLOT) {
679           uint32_t offset = OffsetField::decode(slot.type_and_offset);
680           Address addr = page_start_ + offset;
681           if (callback(type, addr) == KEEP_SLOT) {
682             new_count++;
683             empty = false;
684           } else {
685             slot = ClearedTypedSlot();
686           }
687         }
688       }
689       Chunk* next = chunk->next;
690       if (mode == FREE_EMPTY_CHUNKS && empty) {
691         // We remove the chunk from the list but let it still point its next
692         // chunk to allow concurrent iteration.
693         if (previous) {
694           StoreNext(previous, next);
695         } else {
696           StoreHead(next);
697         }
698 
699         delete chunk;
700       } else {
701         previous = chunk;
702       }
703       chunk = next;
704     }
705     return new_count;
706   }
707 
708   // Clears all slots that have the offset in the specified ranges.
709   // This can run concurrently to Iterate().
710   void ClearInvalidSlots(const std::map<uint32_t, uint32_t>& invalid_ranges);
711 
712   // Frees empty chunks accumulated by PREFREE_EMPTY_CHUNKS.
713   void FreeToBeFreedChunks();
714 
715  private:
716   // Atomic operations used by Iterate and ClearInvalidSlots;
LoadNext(Chunk * chunk)717   Chunk* LoadNext(Chunk* chunk) {
718     return base::AsAtomicPointer::Relaxed_Load(&chunk->next);
719   }
StoreNext(Chunk * chunk,Chunk * next)720   void StoreNext(Chunk* chunk, Chunk* next) {
721     return base::AsAtomicPointer::Relaxed_Store(&chunk->next, next);
722   }
LoadHead()723   Chunk* LoadHead() { return base::AsAtomicPointer::Relaxed_Load(&head_); }
StoreHead(Chunk * chunk)724   void StoreHead(Chunk* chunk) {
725     base::AsAtomicPointer::Relaxed_Store(&head_, chunk);
726   }
ClearedTypedSlot()727   static TypedSlot ClearedTypedSlot() {
728     return TypedSlot{TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0)};
729   }
730 
731   Address page_start_;
732 };
733 
734 }  // namespace internal
735 }  // namespace v8
736 
737 #endif  // V8_HEAP_SLOT_SET_H_
738