1 //===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the DenseMap class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #ifndef LLVM_ADT_DENSEMAP_H
15 #define LLVM_ADT_DENSEMAP_H
16
17 #include "llvm/ADT/DenseMapInfo.h"
18 #include "llvm/ADT/EpochTracker.h"
19 #include "llvm/Support/AlignOf.h"
20 #include "llvm/Support/Compiler.h"
21 #include "llvm/Support/MathExtras.h"
22 #include "llvm/Support/PointerLikeTypeTraits.h"
23 #include "llvm/Support/type_traits.h"
24 #include <algorithm>
25 #include <cassert>
26 #include <climits>
27 #include <cstddef>
28 #include <cstring>
29 #include <iterator>
30 #include <new>
31 #include <utility>
32
33 namespace llvm {
34
35 namespace detail {
36 // We extend a pair to allow users to override the bucket type with their own
37 // implementation without requiring two members.
38 template <typename KeyT, typename ValueT>
39 struct DenseMapPair : public std::pair<KeyT, ValueT> {
getFirstDenseMapPair40 KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
getFirstDenseMapPair41 const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
getSecondDenseMapPair42 ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
getSecondDenseMapPair43 const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
44 };
45 }
46
47 template <
48 typename KeyT, typename ValueT, typename KeyInfoT = DenseMapInfo<KeyT>,
49 typename Bucket = detail::DenseMapPair<KeyT, ValueT>, bool IsConst = false>
50 class DenseMapIterator;
51
52 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
53 typename BucketT>
54 class DenseMapBase : public DebugEpochBase {
55 public:
56 typedef unsigned size_type;
57 typedef KeyT key_type;
58 typedef ValueT mapped_type;
59 typedef BucketT value_type;
60
61 typedef DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT> iterator;
62 typedef DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT, true>
63 const_iterator;
begin()64 inline iterator begin() {
65 // When the map is empty, avoid the overhead of AdvancePastEmptyBuckets().
66 return empty() ? end() : iterator(getBuckets(), getBucketsEnd(), *this);
67 }
end()68 inline iterator end() {
69 return iterator(getBucketsEnd(), getBucketsEnd(), *this, true);
70 }
begin()71 inline const_iterator begin() const {
72 return empty() ? end()
73 : const_iterator(getBuckets(), getBucketsEnd(), *this);
74 }
end()75 inline const_iterator end() const {
76 return const_iterator(getBucketsEnd(), getBucketsEnd(), *this, true);
77 }
78
empty()79 bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const {
80 return getNumEntries() == 0;
81 }
size()82 unsigned size() const { return getNumEntries(); }
83
84 /// Grow the densemap so that it has at least Size buckets. Does not shrink
resize(size_type Size)85 void resize(size_type Size) {
86 incrementEpoch();
87 if (Size > getNumBuckets())
88 grow(Size);
89 }
90
clear()91 void clear() {
92 incrementEpoch();
93 if (getNumEntries() == 0 && getNumTombstones() == 0) return;
94
95 // If the capacity of the array is huge, and the # elements used is small,
96 // shrink the array.
97 if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
98 shrink_and_clear();
99 return;
100 }
101
102 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
103 unsigned NumEntries = getNumEntries();
104 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
105 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
106 if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
107 P->getSecond().~ValueT();
108 --NumEntries;
109 }
110 P->getFirst() = EmptyKey;
111 }
112 }
113 assert(NumEntries == 0 && "Node count imbalance!");
114 setNumEntries(0);
115 setNumTombstones(0);
116 }
117
118 /// Return 1 if the specified key is in the map, 0 otherwise.
count(const KeyT & Val)119 size_type count(const KeyT &Val) const {
120 const BucketT *TheBucket;
121 return LookupBucketFor(Val, TheBucket) ? 1 : 0;
122 }
123
find(const KeyT & Val)124 iterator find(const KeyT &Val) {
125 BucketT *TheBucket;
126 if (LookupBucketFor(Val, TheBucket))
127 return iterator(TheBucket, getBucketsEnd(), *this, true);
128 return end();
129 }
find(const KeyT & Val)130 const_iterator find(const KeyT &Val) const {
131 const BucketT *TheBucket;
132 if (LookupBucketFor(Val, TheBucket))
133 return const_iterator(TheBucket, getBucketsEnd(), *this, true);
134 return end();
135 }
136
137 /// Alternate version of find() which allows a different, and possibly
138 /// less expensive, key type.
139 /// The DenseMapInfo is responsible for supplying methods
140 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
141 /// type used.
142 template<class LookupKeyT>
find_as(const LookupKeyT & Val)143 iterator find_as(const LookupKeyT &Val) {
144 BucketT *TheBucket;
145 if (LookupBucketFor(Val, TheBucket))
146 return iterator(TheBucket, getBucketsEnd(), *this, true);
147 return end();
148 }
149 template<class LookupKeyT>
find_as(const LookupKeyT & Val)150 const_iterator find_as(const LookupKeyT &Val) const {
151 const BucketT *TheBucket;
152 if (LookupBucketFor(Val, TheBucket))
153 return const_iterator(TheBucket, getBucketsEnd(), *this, true);
154 return end();
155 }
156
157 /// lookup - Return the entry for the specified key, or a default
158 /// constructed value if no such entry exists.
lookup(const KeyT & Val)159 ValueT lookup(const KeyT &Val) const {
160 const BucketT *TheBucket;
161 if (LookupBucketFor(Val, TheBucket))
162 return TheBucket->getSecond();
163 return ValueT();
164 }
165
166 // Inserts key,value pair into the map if the key isn't already in the map.
167 // If the key is already in the map, it returns false and doesn't update the
168 // value.
insert(const std::pair<KeyT,ValueT> & KV)169 std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
170 BucketT *TheBucket;
171 if (LookupBucketFor(KV.first, TheBucket))
172 return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
173 false); // Already in map.
174
175 // Otherwise, insert the new element.
176 TheBucket = InsertIntoBucket(KV.first, KV.second, TheBucket);
177 return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
178 true);
179 }
180
181 // Inserts key,value pair into the map if the key isn't already in the map.
182 // If the key is already in the map, it returns false and doesn't update the
183 // value.
insert(std::pair<KeyT,ValueT> && KV)184 std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
185 BucketT *TheBucket;
186 if (LookupBucketFor(KV.first, TheBucket))
187 return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
188 false); // Already in map.
189
190 // Otherwise, insert the new element.
191 TheBucket = InsertIntoBucket(std::move(KV.first),
192 std::move(KV.second),
193 TheBucket);
194 return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
195 true);
196 }
197
198 /// insert - Range insertion of pairs.
199 template<typename InputIt>
insert(InputIt I,InputIt E)200 void insert(InputIt I, InputIt E) {
201 for (; I != E; ++I)
202 insert(*I);
203 }
204
205
erase(const KeyT & Val)206 bool erase(const KeyT &Val) {
207 BucketT *TheBucket;
208 if (!LookupBucketFor(Val, TheBucket))
209 return false; // not in map.
210
211 TheBucket->getSecond().~ValueT();
212 TheBucket->getFirst() = getTombstoneKey();
213 decrementNumEntries();
214 incrementNumTombstones();
215 return true;
216 }
erase(iterator I)217 void erase(iterator I) {
218 BucketT *TheBucket = &*I;
219 TheBucket->getSecond().~ValueT();
220 TheBucket->getFirst() = getTombstoneKey();
221 decrementNumEntries();
222 incrementNumTombstones();
223 }
224
FindAndConstruct(const KeyT & Key)225 value_type& FindAndConstruct(const KeyT &Key) {
226 BucketT *TheBucket;
227 if (LookupBucketFor(Key, TheBucket))
228 return *TheBucket;
229
230 return *InsertIntoBucket(Key, ValueT(), TheBucket);
231 }
232
233 ValueT &operator[](const KeyT &Key) {
234 return FindAndConstruct(Key).second;
235 }
236
FindAndConstruct(KeyT && Key)237 value_type& FindAndConstruct(KeyT &&Key) {
238 BucketT *TheBucket;
239 if (LookupBucketFor(Key, TheBucket))
240 return *TheBucket;
241
242 return *InsertIntoBucket(std::move(Key), ValueT(), TheBucket);
243 }
244
245 ValueT &operator[](KeyT &&Key) {
246 return FindAndConstruct(std::move(Key)).second;
247 }
248
249 /// isPointerIntoBucketsArray - Return true if the specified pointer points
250 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
251 /// value in the DenseMap).
isPointerIntoBucketsArray(const void * Ptr)252 bool isPointerIntoBucketsArray(const void *Ptr) const {
253 return Ptr >= getBuckets() && Ptr < getBucketsEnd();
254 }
255
256 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
257 /// array. In conjunction with the previous method, this can be used to
258 /// determine whether an insertion caused the DenseMap to reallocate.
getPointerIntoBucketsArray()259 const void *getPointerIntoBucketsArray() const { return getBuckets(); }
260
261 protected:
262 DenseMapBase() = default;
263
destroyAll()264 void destroyAll() {
265 if (getNumBuckets() == 0) // Nothing to do.
266 return;
267
268 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
269 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
270 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
271 !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
272 P->getSecond().~ValueT();
273 P->getFirst().~KeyT();
274 }
275 }
276
initEmpty()277 void initEmpty() {
278 setNumEntries(0);
279 setNumTombstones(0);
280
281 assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
282 "# initial buckets must be a power of two!");
283 const KeyT EmptyKey = getEmptyKey();
284 for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
285 ::new (&B->getFirst()) KeyT(EmptyKey);
286 }
287
moveFromOldBuckets(BucketT * OldBucketsBegin,BucketT * OldBucketsEnd)288 void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
289 initEmpty();
290
291 // Insert all the old elements.
292 const KeyT EmptyKey = getEmptyKey();
293 const KeyT TombstoneKey = getTombstoneKey();
294 for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
295 if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
296 !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
297 // Insert the key/value into the new table.
298 BucketT *DestBucket;
299 bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
300 (void)FoundVal; // silence warning.
301 assert(!FoundVal && "Key already in new map?");
302 DestBucket->getFirst() = std::move(B->getFirst());
303 ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
304 incrementNumEntries();
305
306 // Free the value.
307 B->getSecond().~ValueT();
308 }
309 B->getFirst().~KeyT();
310 }
311 }
312
313 template <typename OtherBaseT>
copyFrom(const DenseMapBase<OtherBaseT,KeyT,ValueT,KeyInfoT,BucketT> & other)314 void copyFrom(
315 const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
316 assert(&other != this);
317 assert(getNumBuckets() == other.getNumBuckets());
318
319 setNumEntries(other.getNumEntries());
320 setNumTombstones(other.getNumTombstones());
321
322 if (isPodLike<KeyT>::value && isPodLike<ValueT>::value)
323 memcpy(getBuckets(), other.getBuckets(),
324 getNumBuckets() * sizeof(BucketT));
325 else
326 for (size_t i = 0; i < getNumBuckets(); ++i) {
327 ::new (&getBuckets()[i].getFirst())
328 KeyT(other.getBuckets()[i].getFirst());
329 if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
330 !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
331 ::new (&getBuckets()[i].getSecond())
332 ValueT(other.getBuckets()[i].getSecond());
333 }
334 }
335
getHashValue(const KeyT & Val)336 static unsigned getHashValue(const KeyT &Val) {
337 return KeyInfoT::getHashValue(Val);
338 }
339 template<typename LookupKeyT>
getHashValue(const LookupKeyT & Val)340 static unsigned getHashValue(const LookupKeyT &Val) {
341 return KeyInfoT::getHashValue(Val);
342 }
getEmptyKey()343 static const KeyT getEmptyKey() {
344 return KeyInfoT::getEmptyKey();
345 }
getTombstoneKey()346 static const KeyT getTombstoneKey() {
347 return KeyInfoT::getTombstoneKey();
348 }
349
350 private:
getNumEntries()351 unsigned getNumEntries() const {
352 return static_cast<const DerivedT *>(this)->getNumEntries();
353 }
setNumEntries(unsigned Num)354 void setNumEntries(unsigned Num) {
355 static_cast<DerivedT *>(this)->setNumEntries(Num);
356 }
incrementNumEntries()357 void incrementNumEntries() {
358 setNumEntries(getNumEntries() + 1);
359 }
decrementNumEntries()360 void decrementNumEntries() {
361 setNumEntries(getNumEntries() - 1);
362 }
getNumTombstones()363 unsigned getNumTombstones() const {
364 return static_cast<const DerivedT *>(this)->getNumTombstones();
365 }
setNumTombstones(unsigned Num)366 void setNumTombstones(unsigned Num) {
367 static_cast<DerivedT *>(this)->setNumTombstones(Num);
368 }
incrementNumTombstones()369 void incrementNumTombstones() {
370 setNumTombstones(getNumTombstones() + 1);
371 }
decrementNumTombstones()372 void decrementNumTombstones() {
373 setNumTombstones(getNumTombstones() - 1);
374 }
getBuckets()375 const BucketT *getBuckets() const {
376 return static_cast<const DerivedT *>(this)->getBuckets();
377 }
getBuckets()378 BucketT *getBuckets() {
379 return static_cast<DerivedT *>(this)->getBuckets();
380 }
getNumBuckets()381 unsigned getNumBuckets() const {
382 return static_cast<const DerivedT *>(this)->getNumBuckets();
383 }
getBucketsEnd()384 BucketT *getBucketsEnd() {
385 return getBuckets() + getNumBuckets();
386 }
getBucketsEnd()387 const BucketT *getBucketsEnd() const {
388 return getBuckets() + getNumBuckets();
389 }
390
grow(unsigned AtLeast)391 void grow(unsigned AtLeast) {
392 static_cast<DerivedT *>(this)->grow(AtLeast);
393 }
394
shrink_and_clear()395 void shrink_and_clear() {
396 static_cast<DerivedT *>(this)->shrink_and_clear();
397 }
398
399
InsertIntoBucket(const KeyT & Key,const ValueT & Value,BucketT * TheBucket)400 BucketT *InsertIntoBucket(const KeyT &Key, const ValueT &Value,
401 BucketT *TheBucket) {
402 TheBucket = InsertIntoBucketImpl(Key, TheBucket);
403
404 TheBucket->getFirst() = Key;
405 ::new (&TheBucket->getSecond()) ValueT(Value);
406 return TheBucket;
407 }
408
InsertIntoBucket(const KeyT & Key,ValueT && Value,BucketT * TheBucket)409 BucketT *InsertIntoBucket(const KeyT &Key, ValueT &&Value,
410 BucketT *TheBucket) {
411 TheBucket = InsertIntoBucketImpl(Key, TheBucket);
412
413 TheBucket->getFirst() = Key;
414 ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
415 return TheBucket;
416 }
417
InsertIntoBucket(KeyT && Key,ValueT && Value,BucketT * TheBucket)418 BucketT *InsertIntoBucket(KeyT &&Key, ValueT &&Value, BucketT *TheBucket) {
419 TheBucket = InsertIntoBucketImpl(Key, TheBucket);
420
421 TheBucket->getFirst() = std::move(Key);
422 ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
423 return TheBucket;
424 }
425
InsertIntoBucketImpl(const KeyT & Key,BucketT * TheBucket)426 BucketT *InsertIntoBucketImpl(const KeyT &Key, BucketT *TheBucket) {
427 incrementEpoch();
428
429 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
430 // the buckets are empty (meaning that many are filled with tombstones),
431 // grow the table.
432 //
433 // The later case is tricky. For example, if we had one empty bucket with
434 // tons of tombstones, failing lookups (e.g. for insertion) would have to
435 // probe almost the entire table until it found the empty bucket. If the
436 // table completely filled with tombstones, no lookup would ever succeed,
437 // causing infinite loops in lookup.
438 unsigned NewNumEntries = getNumEntries() + 1;
439 unsigned NumBuckets = getNumBuckets();
440 if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
441 this->grow(NumBuckets * 2);
442 LookupBucketFor(Key, TheBucket);
443 NumBuckets = getNumBuckets();
444 } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
445 NumBuckets/8)) {
446 this->grow(NumBuckets);
447 LookupBucketFor(Key, TheBucket);
448 }
449 assert(TheBucket);
450
451 // Only update the state after we've grown our bucket space appropriately
452 // so that when growing buckets we have self-consistent entry count.
453 incrementNumEntries();
454
455 // If we are writing over a tombstone, remember this.
456 const KeyT EmptyKey = getEmptyKey();
457 if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
458 decrementNumTombstones();
459
460 return TheBucket;
461 }
462
463 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
464 /// FoundBucket. If the bucket contains the key and a value, this returns
465 /// true, otherwise it returns a bucket with an empty marker or tombstone and
466 /// returns false.
467 template<typename LookupKeyT>
LookupBucketFor(const LookupKeyT & Val,const BucketT * & FoundBucket)468 bool LookupBucketFor(const LookupKeyT &Val,
469 const BucketT *&FoundBucket) const {
470 const BucketT *BucketsPtr = getBuckets();
471 const unsigned NumBuckets = getNumBuckets();
472
473 if (NumBuckets == 0) {
474 FoundBucket = nullptr;
475 return false;
476 }
477
478 // FoundTombstone - Keep track of whether we find a tombstone while probing.
479 const BucketT *FoundTombstone = nullptr;
480 const KeyT EmptyKey = getEmptyKey();
481 const KeyT TombstoneKey = getTombstoneKey();
482 assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
483 !KeyInfoT::isEqual(Val, TombstoneKey) &&
484 "Empty/Tombstone value shouldn't be inserted into map!");
485
486 unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
487 unsigned ProbeAmt = 1;
488 while (1) {
489 const BucketT *ThisBucket = BucketsPtr + BucketNo;
490 // Found Val's bucket? If so, return it.
491 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
492 FoundBucket = ThisBucket;
493 return true;
494 }
495
496 // If we found an empty bucket, the key doesn't exist in the set.
497 // Insert it and return the default value.
498 if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
499 // If we've already seen a tombstone while probing, fill it in instead
500 // of the empty bucket we eventually probed to.
501 FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
502 return false;
503 }
504
505 // If this is a tombstone, remember it. If Val ends up not in the map, we
506 // prefer to return it than something that would require more probing.
507 if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
508 !FoundTombstone)
509 FoundTombstone = ThisBucket; // Remember the first tombstone found.
510
511 // Otherwise, it's a hash collision or a tombstone, continue quadratic
512 // probing.
513 BucketNo += ProbeAmt++;
514 BucketNo &= (NumBuckets-1);
515 }
516 }
517
518 template <typename LookupKeyT>
LookupBucketFor(const LookupKeyT & Val,BucketT * & FoundBucket)519 bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
520 const BucketT *ConstFoundBucket;
521 bool Result = const_cast<const DenseMapBase *>(this)
522 ->LookupBucketFor(Val, ConstFoundBucket);
523 FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
524 return Result;
525 }
526
527 public:
528 /// Return the approximate size (in bytes) of the actual map.
529 /// This is just the raw memory used by DenseMap.
530 /// If entries are pointers to objects, the size of the referenced objects
531 /// are not included.
getMemorySize()532 size_t getMemorySize() const {
533 return getNumBuckets() * sizeof(BucketT);
534 }
535 };
536
537 template <typename KeyT, typename ValueT,
538 typename KeyInfoT = DenseMapInfo<KeyT>,
539 typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
540 class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
541 KeyT, ValueT, KeyInfoT, BucketT> {
542 // Lift some types from the dependent base class into this class for
543 // simplicity of referring to them.
544 typedef DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT> BaseT;
545 friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
546
547 BucketT *Buckets;
548 unsigned NumEntries;
549 unsigned NumTombstones;
550 unsigned NumBuckets;
551
552 public:
553 explicit DenseMap(unsigned NumInitBuckets = 0) {
554 init(NumInitBuckets);
555 }
556
DenseMap(const DenseMap & other)557 DenseMap(const DenseMap &other) : BaseT() {
558 init(0);
559 copyFrom(other);
560 }
561
DenseMap(DenseMap && other)562 DenseMap(DenseMap &&other) : BaseT() {
563 init(0);
564 swap(other);
565 }
566
567 template<typename InputIt>
DenseMap(const InputIt & I,const InputIt & E)568 DenseMap(const InputIt &I, const InputIt &E) {
569 init(NextPowerOf2(std::distance(I, E)));
570 this->insert(I, E);
571 }
572
~DenseMap()573 ~DenseMap() {
574 this->destroyAll();
575 operator delete(Buckets);
576 }
577
swap(DenseMap & RHS)578 void swap(DenseMap& RHS) {
579 this->incrementEpoch();
580 RHS.incrementEpoch();
581 std::swap(Buckets, RHS.Buckets);
582 std::swap(NumEntries, RHS.NumEntries);
583 std::swap(NumTombstones, RHS.NumTombstones);
584 std::swap(NumBuckets, RHS.NumBuckets);
585 }
586
587 DenseMap& operator=(const DenseMap& other) {
588 if (&other != this)
589 copyFrom(other);
590 return *this;
591 }
592
593 DenseMap& operator=(DenseMap &&other) {
594 this->destroyAll();
595 operator delete(Buckets);
596 init(0);
597 swap(other);
598 return *this;
599 }
600
copyFrom(const DenseMap & other)601 void copyFrom(const DenseMap& other) {
602 this->destroyAll();
603 operator delete(Buckets);
604 if (allocateBuckets(other.NumBuckets)) {
605 this->BaseT::copyFrom(other);
606 } else {
607 NumEntries = 0;
608 NumTombstones = 0;
609 }
610 }
611
init(unsigned InitBuckets)612 void init(unsigned InitBuckets) {
613 if (allocateBuckets(InitBuckets)) {
614 this->BaseT::initEmpty();
615 } else {
616 NumEntries = 0;
617 NumTombstones = 0;
618 }
619 }
620
grow(unsigned AtLeast)621 void grow(unsigned AtLeast) {
622 unsigned OldNumBuckets = NumBuckets;
623 BucketT *OldBuckets = Buckets;
624
625 allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
626 assert(Buckets);
627 if (!OldBuckets) {
628 this->BaseT::initEmpty();
629 return;
630 }
631
632 this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
633
634 // Free the old table.
635 operator delete(OldBuckets);
636 }
637
shrink_and_clear()638 void shrink_and_clear() {
639 unsigned OldNumEntries = NumEntries;
640 this->destroyAll();
641
642 // Reduce the number of buckets.
643 unsigned NewNumBuckets = 0;
644 if (OldNumEntries)
645 NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
646 if (NewNumBuckets == NumBuckets) {
647 this->BaseT::initEmpty();
648 return;
649 }
650
651 operator delete(Buckets);
652 init(NewNumBuckets);
653 }
654
655 private:
getNumEntries()656 unsigned getNumEntries() const {
657 return NumEntries;
658 }
setNumEntries(unsigned Num)659 void setNumEntries(unsigned Num) {
660 NumEntries = Num;
661 }
662
getNumTombstones()663 unsigned getNumTombstones() const {
664 return NumTombstones;
665 }
setNumTombstones(unsigned Num)666 void setNumTombstones(unsigned Num) {
667 NumTombstones = Num;
668 }
669
getBuckets()670 BucketT *getBuckets() const {
671 return Buckets;
672 }
673
getNumBuckets()674 unsigned getNumBuckets() const {
675 return NumBuckets;
676 }
677
allocateBuckets(unsigned Num)678 bool allocateBuckets(unsigned Num) {
679 NumBuckets = Num;
680 if (NumBuckets == 0) {
681 Buckets = nullptr;
682 return false;
683 }
684
685 Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets));
686 return true;
687 }
688 };
689
690 template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
691 typename KeyInfoT = DenseMapInfo<KeyT>,
692 typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
693 class SmallDenseMap
694 : public DenseMapBase<
695 SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
696 ValueT, KeyInfoT, BucketT> {
697 // Lift some types from the dependent base class into this class for
698 // simplicity of referring to them.
699 typedef DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT> BaseT;
700 friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
701
702 unsigned Small : 1;
703 unsigned NumEntries : 31;
704 unsigned NumTombstones;
705
706 struct LargeRep {
707 BucketT *Buckets;
708 unsigned NumBuckets;
709 };
710
711 /// A "union" of an inline bucket array and the struct representing
712 /// a large bucket. This union will be discriminated by the 'Small' bit.
713 AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
714
715 public:
716 explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
717 init(NumInitBuckets);
718 }
719
SmallDenseMap(const SmallDenseMap & other)720 SmallDenseMap(const SmallDenseMap &other) : BaseT() {
721 init(0);
722 copyFrom(other);
723 }
724
SmallDenseMap(SmallDenseMap && other)725 SmallDenseMap(SmallDenseMap &&other) : BaseT() {
726 init(0);
727 swap(other);
728 }
729
730 template<typename InputIt>
SmallDenseMap(const InputIt & I,const InputIt & E)731 SmallDenseMap(const InputIt &I, const InputIt &E) {
732 init(NextPowerOf2(std::distance(I, E)));
733 this->insert(I, E);
734 }
735
~SmallDenseMap()736 ~SmallDenseMap() {
737 this->destroyAll();
738 deallocateBuckets();
739 }
740
swap(SmallDenseMap & RHS)741 void swap(SmallDenseMap& RHS) {
742 unsigned TmpNumEntries = RHS.NumEntries;
743 RHS.NumEntries = NumEntries;
744 NumEntries = TmpNumEntries;
745 std::swap(NumTombstones, RHS.NumTombstones);
746
747 const KeyT EmptyKey = this->getEmptyKey();
748 const KeyT TombstoneKey = this->getTombstoneKey();
749 if (Small && RHS.Small) {
750 // If we're swapping inline bucket arrays, we have to cope with some of
751 // the tricky bits of DenseMap's storage system: the buckets are not
752 // fully initialized. Thus we swap every key, but we may have
753 // a one-directional move of the value.
754 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
755 BucketT *LHSB = &getInlineBuckets()[i],
756 *RHSB = &RHS.getInlineBuckets()[i];
757 bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
758 !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
759 bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
760 !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
761 if (hasLHSValue && hasRHSValue) {
762 // Swap together if we can...
763 std::swap(*LHSB, *RHSB);
764 continue;
765 }
766 // Swap separately and handle any assymetry.
767 std::swap(LHSB->getFirst(), RHSB->getFirst());
768 if (hasLHSValue) {
769 ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
770 LHSB->getSecond().~ValueT();
771 } else if (hasRHSValue) {
772 ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
773 RHSB->getSecond().~ValueT();
774 }
775 }
776 return;
777 }
778 if (!Small && !RHS.Small) {
779 std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
780 std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
781 return;
782 }
783
784 SmallDenseMap &SmallSide = Small ? *this : RHS;
785 SmallDenseMap &LargeSide = Small ? RHS : *this;
786
787 // First stash the large side's rep and move the small side across.
788 LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
789 LargeSide.getLargeRep()->~LargeRep();
790 LargeSide.Small = true;
791 // This is similar to the standard move-from-old-buckets, but the bucket
792 // count hasn't actually rotated in this case. So we have to carefully
793 // move construct the keys and values into their new locations, but there
794 // is no need to re-hash things.
795 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
796 BucketT *NewB = &LargeSide.getInlineBuckets()[i],
797 *OldB = &SmallSide.getInlineBuckets()[i];
798 ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
799 OldB->getFirst().~KeyT();
800 if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
801 !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
802 ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
803 OldB->getSecond().~ValueT();
804 }
805 }
806
807 // The hard part of moving the small buckets across is done, just move
808 // the TmpRep into its new home.
809 SmallSide.Small = false;
810 new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
811 }
812
813 SmallDenseMap& operator=(const SmallDenseMap& other) {
814 if (&other != this)
815 copyFrom(other);
816 return *this;
817 }
818
819 SmallDenseMap& operator=(SmallDenseMap &&other) {
820 this->destroyAll();
821 deallocateBuckets();
822 init(0);
823 swap(other);
824 return *this;
825 }
826
copyFrom(const SmallDenseMap & other)827 void copyFrom(const SmallDenseMap& other) {
828 this->destroyAll();
829 deallocateBuckets();
830 Small = true;
831 if (other.getNumBuckets() > InlineBuckets) {
832 Small = false;
833 new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
834 }
835 this->BaseT::copyFrom(other);
836 }
837
init(unsigned InitBuckets)838 void init(unsigned InitBuckets) {
839 Small = true;
840 if (InitBuckets > InlineBuckets) {
841 Small = false;
842 new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
843 }
844 this->BaseT::initEmpty();
845 }
846
grow(unsigned AtLeast)847 void grow(unsigned AtLeast) {
848 if (AtLeast >= InlineBuckets)
849 AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
850
851 if (Small) {
852 if (AtLeast < InlineBuckets)
853 return; // Nothing to do.
854
855 // First move the inline buckets into a temporary storage.
856 AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
857 BucketT *TmpBegin = reinterpret_cast<BucketT *>(TmpStorage.buffer);
858 BucketT *TmpEnd = TmpBegin;
859
860 // Loop over the buckets, moving non-empty, non-tombstones into the
861 // temporary storage. Have the loop move the TmpEnd forward as it goes.
862 const KeyT EmptyKey = this->getEmptyKey();
863 const KeyT TombstoneKey = this->getTombstoneKey();
864 for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
865 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
866 !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
867 assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
868 "Too many inline buckets!");
869 ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
870 ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
871 ++TmpEnd;
872 P->getSecond().~ValueT();
873 }
874 P->getFirst().~KeyT();
875 }
876
877 // Now make this map use the large rep, and move all the entries back
878 // into it.
879 Small = false;
880 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
881 this->moveFromOldBuckets(TmpBegin, TmpEnd);
882 return;
883 }
884
885 LargeRep OldRep = std::move(*getLargeRep());
886 getLargeRep()->~LargeRep();
887 if (AtLeast <= InlineBuckets) {
888 Small = true;
889 } else {
890 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
891 }
892
893 this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
894
895 // Free the old table.
896 operator delete(OldRep.Buckets);
897 }
898
shrink_and_clear()899 void shrink_and_clear() {
900 unsigned OldSize = this->size();
901 this->destroyAll();
902
903 // Reduce the number of buckets.
904 unsigned NewNumBuckets = 0;
905 if (OldSize) {
906 NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
907 if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
908 NewNumBuckets = 64;
909 }
910 if ((Small && NewNumBuckets <= InlineBuckets) ||
911 (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
912 this->BaseT::initEmpty();
913 return;
914 }
915
916 deallocateBuckets();
917 init(NewNumBuckets);
918 }
919
920 private:
getNumEntries()921 unsigned getNumEntries() const {
922 return NumEntries;
923 }
setNumEntries(unsigned Num)924 void setNumEntries(unsigned Num) {
925 assert(Num < INT_MAX && "Cannot support more than INT_MAX entries");
926 NumEntries = Num;
927 }
928
getNumTombstones()929 unsigned getNumTombstones() const {
930 return NumTombstones;
931 }
setNumTombstones(unsigned Num)932 void setNumTombstones(unsigned Num) {
933 NumTombstones = Num;
934 }
935
getInlineBuckets()936 const BucketT *getInlineBuckets() const {
937 assert(Small);
938 // Note that this cast does not violate aliasing rules as we assert that
939 // the memory's dynamic type is the small, inline bucket buffer, and the
940 // 'storage.buffer' static type is 'char *'.
941 return reinterpret_cast<const BucketT *>(storage.buffer);
942 }
getInlineBuckets()943 BucketT *getInlineBuckets() {
944 return const_cast<BucketT *>(
945 const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
946 }
getLargeRep()947 const LargeRep *getLargeRep() const {
948 assert(!Small);
949 // Note, same rule about aliasing as with getInlineBuckets.
950 return reinterpret_cast<const LargeRep *>(storage.buffer);
951 }
getLargeRep()952 LargeRep *getLargeRep() {
953 return const_cast<LargeRep *>(
954 const_cast<const SmallDenseMap *>(this)->getLargeRep());
955 }
956
getBuckets()957 const BucketT *getBuckets() const {
958 return Small ? getInlineBuckets() : getLargeRep()->Buckets;
959 }
getBuckets()960 BucketT *getBuckets() {
961 return const_cast<BucketT *>(
962 const_cast<const SmallDenseMap *>(this)->getBuckets());
963 }
getNumBuckets()964 unsigned getNumBuckets() const {
965 return Small ? InlineBuckets : getLargeRep()->NumBuckets;
966 }
967
deallocateBuckets()968 void deallocateBuckets() {
969 if (Small)
970 return;
971
972 operator delete(getLargeRep()->Buckets);
973 getLargeRep()->~LargeRep();
974 }
975
allocateBuckets(unsigned Num)976 LargeRep allocateBuckets(unsigned Num) {
977 assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
978 LargeRep Rep = {
979 static_cast<BucketT*>(operator new(sizeof(BucketT) * Num)), Num
980 };
981 return Rep;
982 }
983 };
984
985 template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
986 bool IsConst>
987 class DenseMapIterator : DebugEpochBase::HandleBase {
988 typedef DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true> ConstIterator;
989 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
990 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
991
992 public:
993 typedef ptrdiff_t difference_type;
994 typedef typename std::conditional<IsConst, const Bucket, Bucket>::type
995 value_type;
996 typedef value_type *pointer;
997 typedef value_type &reference;
998 typedef std::forward_iterator_tag iterator_category;
999 private:
1000 pointer Ptr, End;
1001 public:
DenseMapIterator()1002 DenseMapIterator() : Ptr(nullptr), End(nullptr) {}
1003
1004 DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch,
1005 bool NoAdvance = false)
1006 : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1007 assert(isHandleInSync() && "invalid construction!");
1008 if (!NoAdvance) AdvancePastEmptyBuckets();
1009 }
1010
1011 // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1012 // for const iterator destinations so it doesn't end up as a user defined copy
1013 // constructor.
1014 template <bool IsConstSrc,
1015 typename = typename std::enable_if<!IsConstSrc && IsConst>::type>
DenseMapIterator(const DenseMapIterator<KeyT,ValueT,KeyInfoT,Bucket,IsConstSrc> & I)1016 DenseMapIterator(
1017 const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I)
1018 : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
1019
1020 reference operator*() const {
1021 assert(isHandleInSync() && "invalid iterator access!");
1022 return *Ptr;
1023 }
1024 pointer operator->() const {
1025 assert(isHandleInSync() && "invalid iterator access!");
1026 return Ptr;
1027 }
1028
1029 bool operator==(const ConstIterator &RHS) const {
1030 assert((!Ptr || isHandleInSync()) && "handle not in sync!");
1031 assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1032 assert(getEpochAddress() == RHS.getEpochAddress() &&
1033 "comparing incomparable iterators!");
1034 return Ptr == RHS.Ptr;
1035 }
1036 bool operator!=(const ConstIterator &RHS) const {
1037 assert((!Ptr || isHandleInSync()) && "handle not in sync!");
1038 assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1039 assert(getEpochAddress() == RHS.getEpochAddress() &&
1040 "comparing incomparable iterators!");
1041 return Ptr != RHS.Ptr;
1042 }
1043
1044 inline DenseMapIterator& operator++() { // Preincrement
1045 assert(isHandleInSync() && "invalid iterator access!");
1046 ++Ptr;
1047 AdvancePastEmptyBuckets();
1048 return *this;
1049 }
1050 DenseMapIterator operator++(int) { // Postincrement
1051 assert(isHandleInSync() && "invalid iterator access!");
1052 DenseMapIterator tmp = *this; ++*this; return tmp;
1053 }
1054
1055 private:
AdvancePastEmptyBuckets()1056 void AdvancePastEmptyBuckets() {
1057 const KeyT Empty = KeyInfoT::getEmptyKey();
1058 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1059
1060 while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1061 KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1062 ++Ptr;
1063 }
1064 };
1065
1066 template<typename KeyT, typename ValueT, typename KeyInfoT>
1067 static inline size_t
capacity_in_bytes(const DenseMap<KeyT,ValueT,KeyInfoT> & X)1068 capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
1069 return X.getMemorySize();
1070 }
1071
1072 } // end namespace llvm
1073
1074 #endif
1075