WPILibC++  unspecified
DenseMap.h
1 //===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the DenseMap class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_ADT_DENSEMAP_H
15 #define LLVM_ADT_DENSEMAP_H
16 
17 #include "llvm/DenseMapInfo.h"
18 #include "llvm/EpochTracker.h"
19 #include "llvm/AlignOf.h"
20 #include "llvm/Compiler.h"
21 #include "llvm/MathExtras.h"
22 #include "llvm/PointerLikeTypeTraits.h"
23 #include "llvm/type_traits.h"
24 #include <algorithm>
25 #include <cassert>
26 #include <climits>
27 #include <cstddef>
28 #include <cstring>
29 #include <iterator>
30 #include <new>
31 #include <utility>
32 
33 namespace llvm {
34 
35 namespace detail {
36 // We extend a pair to allow users to override the bucket type with their own
37 // implementation without requiring two members.
38 template <typename KeyT, typename ValueT>
39 struct DenseMapPair : public std::pair<KeyT, ValueT> {
40  KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
41  const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
42  ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
43  const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
44 };
45 }
46 
47 template <
48  typename KeyT, typename ValueT, typename KeyInfoT = DenseMapInfo<KeyT>,
49  typename Bucket = detail::DenseMapPair<KeyT, ValueT>, bool IsConst = false>
51 
52 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
53  typename BucketT>
54 class DenseMapBase : public DebugEpochBase {
55 public:
56  typedef unsigned size_type;
57  typedef KeyT key_type;
58  typedef ValueT mapped_type;
59  typedef BucketT value_type;
60 
64  inline iterator begin() {
65  // When the map is empty, avoid the overhead of AdvancePastEmptyBuckets().
66  return empty() ? end() : iterator(getBuckets(), getBucketsEnd(), *this);
67  }
68  inline iterator end() {
69  return iterator(getBucketsEnd(), getBucketsEnd(), *this, true);
70  }
71  inline const_iterator begin() const {
72  return empty() ? end()
73  : const_iterator(getBuckets(), getBucketsEnd(), *this);
74  }
75  inline const_iterator end() const {
76  return const_iterator(getBucketsEnd(), getBucketsEnd(), *this, true);
77  }
78 
79  bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const {
80  return getNumEntries() == 0;
81  }
82  unsigned size() const { return getNumEntries(); }
83 
86  void reserve(size_type NumEntries) {
87  auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
88  incrementEpoch();
89  if (NumBuckets > getNumBuckets())
90  grow(NumBuckets);
91  }
92 
93  void clear() {
94  incrementEpoch();
95  if (getNumEntries() == 0 && getNumTombstones() == 0) return;
96 
97  // If the capacity of the array is huge, and the # elements used is small,
98  // shrink the array.
99  if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
100  shrink_and_clear();
101  return;
102  }
103 
104  const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
105  unsigned NumEntries = getNumEntries();
106  for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
107  if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
108  if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
109  P->getSecond().~ValueT();
110  --NumEntries;
111  }
112  P->getFirst() = EmptyKey;
113  }
114  }
115  assert(NumEntries == 0 && "Node count imbalance!");
116  setNumEntries(0);
117  setNumTombstones(0);
118  }
119 
121  size_type count(const KeyT &Val) const {
122  const BucketT *TheBucket;
123  return LookupBucketFor(Val, TheBucket) ? 1 : 0;
124  }
125 
126  iterator find(const KeyT &Val) {
127  BucketT *TheBucket;
128  if (LookupBucketFor(Val, TheBucket))
129  return iterator(TheBucket, getBucketsEnd(), *this, true);
130  return end();
131  }
132  const_iterator find(const KeyT &Val) const {
133  const BucketT *TheBucket;
134  if (LookupBucketFor(Val, TheBucket))
135  return const_iterator(TheBucket, getBucketsEnd(), *this, true);
136  return end();
137  }
138 
144  template<class LookupKeyT>
145  iterator find_as(const LookupKeyT &Val) {
146  BucketT *TheBucket;
147  if (LookupBucketFor(Val, TheBucket))
148  return iterator(TheBucket, getBucketsEnd(), *this, true);
149  return end();
150  }
151  template<class LookupKeyT>
152  const_iterator find_as(const LookupKeyT &Val) const {
153  const BucketT *TheBucket;
154  if (LookupBucketFor(Val, TheBucket))
155  return const_iterator(TheBucket, getBucketsEnd(), *this, true);
156  return end();
157  }
158 
161  ValueT lookup(const KeyT &Val) const {
162  const BucketT *TheBucket;
163  if (LookupBucketFor(Val, TheBucket))
164  return TheBucket->getSecond();
165  return ValueT();
166  }
167 
168  // Inserts key,value pair into the map if the key isn't already in the map.
169  // If the key is already in the map, it returns false and doesn't update the
170  // value.
171  std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
172  BucketT *TheBucket;
173  if (LookupBucketFor(KV.first, TheBucket))
174  return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
175  false); // Already in map.
176 
177  // Otherwise, insert the new element.
178  TheBucket = InsertIntoBucket(KV.first, KV.second, TheBucket);
179  return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
180  true);
181  }
182 
183  // Inserts key,value pair into the map if the key isn't already in the map.
184  // If the key is already in the map, it returns false and doesn't update the
185  // value.
186  std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
187  BucketT *TheBucket;
188  if (LookupBucketFor(KV.first, TheBucket))
189  return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
190  false); // Already in map.
191 
192  // Otherwise, insert the new element.
193  TheBucket = InsertIntoBucket(std::move(KV.first),
194  std::move(KV.second),
195  TheBucket);
196  return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
197  true);
198  }
199 
205  template <typename LookupKeyT>
206  std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
207  const LookupKeyT &Val) {
208  BucketT *TheBucket;
209  if (LookupBucketFor(Val, TheBucket))
210  return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
211  false); // Already in map.
212 
213  // Otherwise, insert the new element.
214  TheBucket = InsertIntoBucket(std::move(KV.first), std::move(KV.second), Val,
215  TheBucket);
216  return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
217  true);
218  }
219 
221  template<typename InputIt>
222  void insert(InputIt I, InputIt E) {
223  for (; I != E; ++I)
224  insert(*I);
225  }
226 
227 
228  bool erase(const KeyT &Val) {
229  BucketT *TheBucket;
230  if (!LookupBucketFor(Val, TheBucket))
231  return false; // not in map.
232 
233  TheBucket->getSecond().~ValueT();
234  TheBucket->getFirst() = getTombstoneKey();
235  decrementNumEntries();
236  incrementNumTombstones();
237  return true;
238  }
239  void erase(iterator I) {
240  BucketT *TheBucket = &*I;
241  TheBucket->getSecond().~ValueT();
242  TheBucket->getFirst() = getTombstoneKey();
243  decrementNumEntries();
244  incrementNumTombstones();
245  }
246 
247  value_type& FindAndConstruct(const KeyT &Key) {
248  BucketT *TheBucket;
249  if (LookupBucketFor(Key, TheBucket))
250  return *TheBucket;
251 
252  return *InsertIntoBucket(Key, ValueT(), TheBucket);
253  }
254 
255  ValueT &operator[](const KeyT &Key) {
256  return FindAndConstruct(Key).second;
257  }
258 
259  value_type& FindAndConstruct(KeyT &&Key) {
260  BucketT *TheBucket;
261  if (LookupBucketFor(Key, TheBucket))
262  return *TheBucket;
263 
264  return *InsertIntoBucket(std::move(Key), ValueT(), TheBucket);
265  }
266 
267  ValueT &operator[](KeyT &&Key) {
268  return FindAndConstruct(std::move(Key)).second;
269  }
270 
274  bool isPointerIntoBucketsArray(const void *Ptr) const {
275  return Ptr >= getBuckets() && Ptr < getBucketsEnd();
276  }
277 
281  const void *getPointerIntoBucketsArray() const { return getBuckets(); }
282 
283 protected:
284  DenseMapBase() = default;
285 
286  void destroyAll() {
287  if (getNumBuckets() == 0) // Nothing to do.
288  return;
289 
290  const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
291  for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
292  if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
293  !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
294  P->getSecond().~ValueT();
295  P->getFirst().~KeyT();
296  }
297  }
298 
299  void initEmpty() {
300  setNumEntries(0);
301  setNumTombstones(0);
302 
303  assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
304  "# initial buckets must be a power of two!");
305  const KeyT EmptyKey = getEmptyKey();
306  for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
307  ::new (&B->getFirst()) KeyT(EmptyKey);
308  }
309 
312  unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
313  // Ensure that "NumEntries * 4 < NumBuckets * 3"
314  if (NumEntries == 0)
315  return 0;
316  // +1 is required because of the strict equality.
317  // For example if NumEntries is 48, we need to return 401.
318  return NextPowerOf2(NumEntries * 4 / 3 + 1);
319  }
320 
321  void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
322  initEmpty();
323 
324  // Insert all the old elements.
325  const KeyT EmptyKey = getEmptyKey();
326  const KeyT TombstoneKey = getTombstoneKey();
327  for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
328  if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
329  !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
330  // Insert the key/value into the new table.
331  BucketT *DestBucket;
332  bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
333  (void)FoundVal; // silence warning.
334  assert(!FoundVal && "Key already in new map?");
335  DestBucket->getFirst() = std::move(B->getFirst());
336  ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
337  incrementNumEntries();
338 
339  // Free the value.
340  B->getSecond().~ValueT();
341  }
342  B->getFirst().~KeyT();
343  }
344  }
345 
346  template <typename OtherBaseT>
347  void copyFrom(
349  assert(&other != this);
350  assert(getNumBuckets() == other.getNumBuckets());
351 
352  setNumEntries(other.getNumEntries());
353  setNumTombstones(other.getNumTombstones());
354 
356  memcpy(getBuckets(), other.getBuckets(),
357  getNumBuckets() * sizeof(BucketT));
358  else
359  for (size_t i = 0; i < getNumBuckets(); ++i) {
360  ::new (&getBuckets()[i].getFirst())
361  KeyT(other.getBuckets()[i].getFirst());
362  if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
363  !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
364  ::new (&getBuckets()[i].getSecond())
365  ValueT(other.getBuckets()[i].getSecond());
366  }
367  }
368 
369  static unsigned getHashValue(const KeyT &Val) {
370  return KeyInfoT::getHashValue(Val);
371  }
372  template<typename LookupKeyT>
373  static unsigned getHashValue(const LookupKeyT &Val) {
374  return KeyInfoT::getHashValue(Val);
375  }
376  static const KeyT getEmptyKey() {
377  return KeyInfoT::getEmptyKey();
378  }
379  static const KeyT getTombstoneKey() {
380  return KeyInfoT::getTombstoneKey();
381  }
382 
383 private:
384  unsigned getNumEntries() const {
385  return static_cast<const DerivedT *>(this)->getNumEntries();
386  }
387  void setNumEntries(unsigned Num) {
388  static_cast<DerivedT *>(this)->setNumEntries(Num);
389  }
390  void incrementNumEntries() {
391  setNumEntries(getNumEntries() + 1);
392  }
393  void decrementNumEntries() {
394  setNumEntries(getNumEntries() - 1);
395  }
396  unsigned getNumTombstones() const {
397  return static_cast<const DerivedT *>(this)->getNumTombstones();
398  }
399  void setNumTombstones(unsigned Num) {
400  static_cast<DerivedT *>(this)->setNumTombstones(Num);
401  }
402  void incrementNumTombstones() {
403  setNumTombstones(getNumTombstones() + 1);
404  }
405  void decrementNumTombstones() {
406  setNumTombstones(getNumTombstones() - 1);
407  }
408  const BucketT *getBuckets() const {
409  return static_cast<const DerivedT *>(this)->getBuckets();
410  }
411  BucketT *getBuckets() {
412  return static_cast<DerivedT *>(this)->getBuckets();
413  }
414  unsigned getNumBuckets() const {
415  return static_cast<const DerivedT *>(this)->getNumBuckets();
416  }
417  BucketT *getBucketsEnd() {
418  return getBuckets() + getNumBuckets();
419  }
420  const BucketT *getBucketsEnd() const {
421  return getBuckets() + getNumBuckets();
422  }
423 
424  void grow(unsigned AtLeast) {
425  static_cast<DerivedT *>(this)->grow(AtLeast);
426  }
427 
428  void shrink_and_clear() {
429  static_cast<DerivedT *>(this)->shrink_and_clear();
430  }
431 
432 
433  BucketT *InsertIntoBucket(const KeyT &Key, const ValueT &Value,
434  BucketT *TheBucket) {
435  TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
436 
437  TheBucket->getFirst() = Key;
438  ::new (&TheBucket->getSecond()) ValueT(Value);
439  return TheBucket;
440  }
441 
442  BucketT *InsertIntoBucket(const KeyT &Key, ValueT &&Value,
443  BucketT *TheBucket) {
444  TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
445 
446  TheBucket->getFirst() = Key;
447  ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
448  return TheBucket;
449  }
450 
451  BucketT *InsertIntoBucket(KeyT &&Key, ValueT &&Value, BucketT *TheBucket) {
452  TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
453 
454  TheBucket->getFirst() = std::move(Key);
455  ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
456  return TheBucket;
457  }
458 
459  template <typename LookupKeyT>
460  BucketT *InsertIntoBucket(KeyT &&Key, ValueT &&Value, LookupKeyT &Lookup,
461  BucketT *TheBucket) {
462  TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
463 
464  TheBucket->getFirst() = std::move(Key);
465  ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
466  return TheBucket;
467  }
468 
469  template <typename LookupKeyT>
470  BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
471  BucketT *TheBucket) {
472  incrementEpoch();
473 
474  // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
475  // the buckets are empty (meaning that many are filled with tombstones),
476  // grow the table.
477  //
478  // The later case is tricky. For example, if we had one empty bucket with
479  // tons of tombstones, failing lookups (e.g. for insertion) would have to
480  // probe almost the entire table until it found the empty bucket. If the
481  // table completely filled with tombstones, no lookup would ever succeed,
482  // causing infinite loops in lookup.
483  unsigned NewNumEntries = getNumEntries() + 1;
484  unsigned NumBuckets = getNumBuckets();
485  if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
486  this->grow(NumBuckets * 2);
487  LookupBucketFor(Lookup, TheBucket);
488  NumBuckets = getNumBuckets();
489  } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
490  NumBuckets/8)) {
491  this->grow(NumBuckets);
492  LookupBucketFor(Lookup, TheBucket);
493  }
494  assert(TheBucket);
495 
496  // Only update the state after we've grown our bucket space appropriately
497  // so that when growing buckets we have self-consistent entry count.
498  incrementNumEntries();
499 
500  // If we are writing over a tombstone, remember this.
501  const KeyT EmptyKey = getEmptyKey();
502  if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
503  decrementNumTombstones();
504 
505  return TheBucket;
506  }
507 
512  template<typename LookupKeyT>
513  bool LookupBucketFor(const LookupKeyT &Val,
514  const BucketT *&FoundBucket) const {
515  const BucketT *BucketsPtr = getBuckets();
516  const unsigned NumBuckets = getNumBuckets();
517 
518  if (NumBuckets == 0) {
519  FoundBucket = nullptr;
520  return false;
521  }
522 
523  // FoundTombstone - Keep track of whether we find a tombstone while probing.
524  const BucketT *FoundTombstone = nullptr;
525  const KeyT EmptyKey = getEmptyKey();
526  const KeyT TombstoneKey = getTombstoneKey();
527  assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
528  !KeyInfoT::isEqual(Val, TombstoneKey) &&
529  "Empty/Tombstone value shouldn't be inserted into map!");
530 
531  unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
532  unsigned ProbeAmt = 1;
533  while (1) {
534  const BucketT *ThisBucket = BucketsPtr + BucketNo;
535  // Found Val's bucket? If so, return it.
536  if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
537  FoundBucket = ThisBucket;
538  return true;
539  }
540 
541  // If we found an empty bucket, the key doesn't exist in the set.
542  // Insert it and return the default value.
543  if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
544  // If we've already seen a tombstone while probing, fill it in instead
545  // of the empty bucket we eventually probed to.
546  FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
547  return false;
548  }
549 
550  // If this is a tombstone, remember it. If Val ends up not in the map, we
551  // prefer to return it than something that would require more probing.
552  if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
553  !FoundTombstone)
554  FoundTombstone = ThisBucket; // Remember the first tombstone found.
555 
556  // Otherwise, it's a hash collision or a tombstone, continue quadratic
557  // probing.
558  BucketNo += ProbeAmt++;
559  BucketNo &= (NumBuckets-1);
560  }
561  }
562 
563  template <typename LookupKeyT>
564  bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
565  const BucketT *ConstFoundBucket;
566  bool Result = const_cast<const DenseMapBase *>(this)
567  ->LookupBucketFor(Val, ConstFoundBucket);
568  FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
569  return Result;
570  }
571 
572 public:
577  size_t getMemorySize() const {
578  return getNumBuckets() * sizeof(BucketT);
579  }
580 };
581 
582 template <typename KeyT, typename ValueT,
583  typename KeyInfoT = DenseMapInfo<KeyT>,
584  typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
585 class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
586  KeyT, ValueT, KeyInfoT, BucketT> {
587  // Lift some types from the dependent base class into this class for
588  // simplicity of referring to them.
590  friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
591 
592  BucketT *Buckets;
593  unsigned NumEntries;
594  unsigned NumTombstones;
595  unsigned NumBuckets;
596 
597 public:
600  explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
601 
602  DenseMap(const DenseMap &other) : BaseT() {
603  init(0);
604  copyFrom(other);
605  }
606 
607  DenseMap(DenseMap &&other) : BaseT() {
608  init(0);
609  swap(other);
610  }
611 
612  template<typename InputIt>
613  DenseMap(const InputIt &I, const InputIt &E) {
614  init(std::distance(I, E));
615  this->insert(I, E);
616  }
617 
618  ~DenseMap() {
619  this->destroyAll();
620  operator delete(Buckets);
621  }
622 
623  void swap(DenseMap& RHS) {
624  this->incrementEpoch();
625  RHS.incrementEpoch();
626  std::swap(Buckets, RHS.Buckets);
627  std::swap(NumEntries, RHS.NumEntries);
628  std::swap(NumTombstones, RHS.NumTombstones);
629  std::swap(NumBuckets, RHS.NumBuckets);
630  }
631 
632  DenseMap& operator=(const DenseMap& other) {
633  if (&other != this)
634  copyFrom(other);
635  return *this;
636  }
637 
638  DenseMap& operator=(DenseMap &&other) {
639  this->destroyAll();
640  operator delete(Buckets);
641  init(0);
642  swap(other);
643  return *this;
644  }
645 
646  void copyFrom(const DenseMap& other) {
647  this->destroyAll();
648  operator delete(Buckets);
649  if (allocateBuckets(other.NumBuckets)) {
650  this->BaseT::copyFrom(other);
651  } else {
652  NumEntries = 0;
653  NumTombstones = 0;
654  }
655  }
656 
657  void init(unsigned InitNumEntries) {
658  auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
659  if (allocateBuckets(InitBuckets)) {
660  this->BaseT::initEmpty();
661  } else {
662  NumEntries = 0;
663  NumTombstones = 0;
664  }
665  }
666 
667  void grow(unsigned AtLeast) {
668  unsigned OldNumBuckets = NumBuckets;
669  BucketT *OldBuckets = Buckets;
670 
671  allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
672  assert(Buckets);
673  if (!OldBuckets) {
674  this->BaseT::initEmpty();
675  return;
676  }
677 
678  this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
679 
680  // Free the old table.
681  operator delete(OldBuckets);
682  }
683 
684  void shrink_and_clear() {
685  unsigned OldNumEntries = NumEntries;
686  this->destroyAll();
687 
688  // Reduce the number of buckets.
689  unsigned NewNumBuckets = 0;
690  if (OldNumEntries)
691  NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
692  if (NewNumBuckets == NumBuckets) {
693  this->BaseT::initEmpty();
694  return;
695  }
696 
697  operator delete(Buckets);
698  init(NewNumBuckets);
699  }
700 
701 private:
702  unsigned getNumEntries() const {
703  return NumEntries;
704  }
705  void setNumEntries(unsigned Num) {
706  NumEntries = Num;
707  }
708 
709  unsigned getNumTombstones() const {
710  return NumTombstones;
711  }
712  void setNumTombstones(unsigned Num) {
713  NumTombstones = Num;
714  }
715 
716  BucketT *getBuckets() const {
717  return Buckets;
718  }
719 
720  unsigned getNumBuckets() const {
721  return NumBuckets;
722  }
723 
724  bool allocateBuckets(unsigned Num) {
725  NumBuckets = Num;
726  if (NumBuckets == 0) {
727  Buckets = nullptr;
728  return false;
729  }
730 
731  Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets));
732  return true;
733  }
734 };
735 
736 template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
737  typename KeyInfoT = DenseMapInfo<KeyT>,
738  typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
740  : public DenseMapBase<
741  SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
742  ValueT, KeyInfoT, BucketT> {
743  // Lift some types from the dependent base class into this class for
744  // simplicity of referring to them.
746  friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
747 
748  unsigned Small : 1;
749  unsigned NumEntries : 31;
750  unsigned NumTombstones;
751 
752  struct LargeRep {
753  BucketT *Buckets;
754  unsigned NumBuckets;
755  };
756 
760 
761 public:
762  explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
763  init(NumInitBuckets);
764  }
765 
766  SmallDenseMap(const SmallDenseMap &other) : BaseT() {
767  init(0);
768  copyFrom(other);
769  }
770 
771  SmallDenseMap(SmallDenseMap &&other) : BaseT() {
772  init(0);
773  swap(other);
774  }
775 
776  template<typename InputIt>
777  SmallDenseMap(const InputIt &I, const InputIt &E) {
778  init(NextPowerOf2(std::distance(I, E)));
779  this->insert(I, E);
780  }
781 
782  ~SmallDenseMap() {
783  this->destroyAll();
784  deallocateBuckets();
785  }
786 
787  void swap(SmallDenseMap& RHS) {
788  unsigned TmpNumEntries = RHS.NumEntries;
789  RHS.NumEntries = NumEntries;
790  NumEntries = TmpNumEntries;
791  std::swap(NumTombstones, RHS.NumTombstones);
792 
793  const KeyT EmptyKey = this->getEmptyKey();
794  const KeyT TombstoneKey = this->getTombstoneKey();
795  if (Small && RHS.Small) {
796  // If we're swapping inline bucket arrays, we have to cope with some of
797  // the tricky bits of DenseMap's storage system: the buckets are not
798  // fully initialized. Thus we swap every key, but we may have
799  // a one-directional move of the value.
800  for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
801  BucketT *LHSB = &getInlineBuckets()[i],
802  *RHSB = &RHS.getInlineBuckets()[i];
803  bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
804  !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
805  bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
806  !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
807  if (hasLHSValue && hasRHSValue) {
808  // Swap together if we can...
809  std::swap(*LHSB, *RHSB);
810  continue;
811  }
812  // Swap separately and handle any assymetry.
813  std::swap(LHSB->getFirst(), RHSB->getFirst());
814  if (hasLHSValue) {
815  ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
816  LHSB->getSecond().~ValueT();
817  } else if (hasRHSValue) {
818  ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
819  RHSB->getSecond().~ValueT();
820  }
821  }
822  return;
823  }
824  if (!Small && !RHS.Small) {
825  std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
826  std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
827  return;
828  }
829 
830  SmallDenseMap &SmallSide = Small ? *this : RHS;
831  SmallDenseMap &LargeSide = Small ? RHS : *this;
832 
833  // First stash the large side's rep and move the small side across.
834  LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
835  LargeSide.getLargeRep()->~LargeRep();
836  LargeSide.Small = true;
837  // This is similar to the standard move-from-old-buckets, but the bucket
838  // count hasn't actually rotated in this case. So we have to carefully
839  // move construct the keys and values into their new locations, but there
840  // is no need to re-hash things.
841  for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
842  BucketT *NewB = &LargeSide.getInlineBuckets()[i],
843  *OldB = &SmallSide.getInlineBuckets()[i];
844  ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
845  OldB->getFirst().~KeyT();
846  if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
847  !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
848  ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
849  OldB->getSecond().~ValueT();
850  }
851  }
852 
853  // The hard part of moving the small buckets across is done, just move
854  // the TmpRep into its new home.
855  SmallSide.Small = false;
856  new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
857  }
858 
859  SmallDenseMap& operator=(const SmallDenseMap& other) {
860  if (&other != this)
861  copyFrom(other);
862  return *this;
863  }
864 
865  SmallDenseMap& operator=(SmallDenseMap &&other) {
866  this->destroyAll();
867  deallocateBuckets();
868  init(0);
869  swap(other);
870  return *this;
871  }
872 
873  void copyFrom(const SmallDenseMap& other) {
874  this->destroyAll();
875  deallocateBuckets();
876  Small = true;
877  if (other.getNumBuckets() > InlineBuckets) {
878  Small = false;
879  new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
880  }
881  this->BaseT::copyFrom(other);
882  }
883 
884  void init(unsigned InitBuckets) {
885  Small = true;
886  if (InitBuckets > InlineBuckets) {
887  Small = false;
888  new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
889  }
890  this->BaseT::initEmpty();
891  }
892 
893  void grow(unsigned AtLeast) {
894  if (AtLeast >= InlineBuckets)
895  AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
896 
897  if (Small) {
898  if (AtLeast < InlineBuckets)
899  return; // Nothing to do.
900 
901  // First move the inline buckets into a temporary storage.
903  BucketT *TmpBegin = reinterpret_cast<BucketT *>(TmpStorage.buffer);
904  BucketT *TmpEnd = TmpBegin;
905 
906  // Loop over the buckets, moving non-empty, non-tombstones into the
907  // temporary storage. Have the loop move the TmpEnd forward as it goes.
908  const KeyT EmptyKey = this->getEmptyKey();
909  const KeyT TombstoneKey = this->getTombstoneKey();
910  for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
911  if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
912  !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
913  assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
914  "Too many inline buckets!");
915  ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
916  ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
917  ++TmpEnd;
918  P->getSecond().~ValueT();
919  }
920  P->getFirst().~KeyT();
921  }
922 
923  // Now make this map use the large rep, and move all the entries back
924  // into it.
925  Small = false;
926  new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
927  this->moveFromOldBuckets(TmpBegin, TmpEnd);
928  return;
929  }
930 
931  LargeRep OldRep = std::move(*getLargeRep());
932  getLargeRep()->~LargeRep();
933  if (AtLeast <= InlineBuckets) {
934  Small = true;
935  } else {
936  new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
937  }
938 
939  this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
940 
941  // Free the old table.
942  operator delete(OldRep.Buckets);
943  }
944 
945  void shrink_and_clear() {
946  unsigned OldSize = this->size();
947  this->destroyAll();
948 
949  // Reduce the number of buckets.
950  unsigned NewNumBuckets = 0;
951  if (OldSize) {
952  NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
953  if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
954  NewNumBuckets = 64;
955  }
956  if ((Small && NewNumBuckets <= InlineBuckets) ||
957  (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
958  this->BaseT::initEmpty();
959  return;
960  }
961 
962  deallocateBuckets();
963  init(NewNumBuckets);
964  }
965 
966 private:
967  unsigned getNumEntries() const {
968  return NumEntries;
969  }
970  void setNumEntries(unsigned Num) {
971  assert(Num < INT_MAX && "Cannot support more than INT_MAX entries");
972  NumEntries = Num;
973  }
974 
975  unsigned getNumTombstones() const {
976  return NumTombstones;
977  }
978  void setNumTombstones(unsigned Num) {
979  NumTombstones = Num;
980  }
981 
982  const BucketT *getInlineBuckets() const {
983  assert(Small);
984  // Note that this cast does not violate aliasing rules as we assert that
985  // the memory's dynamic type is the small, inline bucket buffer, and the
986  // 'storage.buffer' static type is 'char *'.
987  return reinterpret_cast<const BucketT *>(storage.buffer);
988  }
989  BucketT *getInlineBuckets() {
990  return const_cast<BucketT *>(
991  const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
992  }
993  const LargeRep *getLargeRep() const {
994  assert(!Small);
995  // Note, same rule about aliasing as with getInlineBuckets.
996  return reinterpret_cast<const LargeRep *>(storage.buffer);
997  }
998  LargeRep *getLargeRep() {
999  return const_cast<LargeRep *>(
1000  const_cast<const SmallDenseMap *>(this)->getLargeRep());
1001  }
1002 
1003  const BucketT *getBuckets() const {
1004  return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1005  }
1006  BucketT *getBuckets() {
1007  return const_cast<BucketT *>(
1008  const_cast<const SmallDenseMap *>(this)->getBuckets());
1009  }
1010  unsigned getNumBuckets() const {
1011  return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1012  }
1013 
1014  void deallocateBuckets() {
1015  if (Small)
1016  return;
1017 
1018  operator delete(getLargeRep()->Buckets);
1019  getLargeRep()->~LargeRep();
1020  }
1021 
1022  LargeRep allocateBuckets(unsigned Num) {
1023  assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
1024  LargeRep Rep = {
1025  static_cast<BucketT*>(operator new(sizeof(BucketT) * Num)), Num
1026  };
1027  return Rep;
1028  }
1029 };
1030 
1031 template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
1032  bool IsConst>
1035  friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1036  friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1037 
1038 public:
1039  typedef ptrdiff_t difference_type;
1040  typedef typename std::conditional<IsConst, const Bucket, Bucket>::type
1041  value_type;
1042  typedef value_type *pointer;
1043  typedef value_type &reference;
1044  typedef std::forward_iterator_tag iterator_category;
1045 private:
1046  pointer Ptr, End;
1047 public:
1048  DenseMapIterator() : Ptr(nullptr), End(nullptr) {}
1049 
1050  DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch,
1051  bool NoAdvance = false)
1052  : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1053  assert(isHandleInSync() && "invalid construction!");
1054  if (!NoAdvance) AdvancePastEmptyBuckets();
1055  }
1056 
1057  // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1058  // for const iterator destinations so it doesn't end up as a user defined copy
1059  // constructor.
1060  template <bool IsConstSrc,
1061  typename = typename std::enable_if<!IsConstSrc && IsConst>::type>
1064  : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
1065 
1066  reference operator*() const {
1067  assert(isHandleInSync() && "invalid iterator access!");
1068  return *Ptr;
1069  }
1070  pointer operator->() const {
1071  assert(isHandleInSync() && "invalid iterator access!");
1072  return Ptr;
1073  }
1074 
1075  bool operator==(const ConstIterator &RHS) const {
1076  assert((!Ptr || isHandleInSync()) && "handle not in sync!");
1077  assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1078  assert(getEpochAddress() == RHS.getEpochAddress() &&
1079  "comparing incomparable iterators!");
1080  return Ptr == RHS.Ptr;
1081  }
1082  bool operator!=(const ConstIterator &RHS) const {
1083  assert((!Ptr || isHandleInSync()) && "handle not in sync!");
1084  assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1085  assert(getEpochAddress() == RHS.getEpochAddress() &&
1086  "comparing incomparable iterators!");
1087  return Ptr != RHS.Ptr;
1088  }
1089 
1090  inline DenseMapIterator& operator++() { // Preincrement
1091  assert(isHandleInSync() && "invalid iterator access!");
1092  ++Ptr;
1093  AdvancePastEmptyBuckets();
1094  return *this;
1095  }
1096  DenseMapIterator operator++(int) { // Postincrement
1097  assert(isHandleInSync() && "invalid iterator access!");
1098  DenseMapIterator tmp = *this; ++*this; return tmp;
1099  }
1100 
1101 private:
1102  void AdvancePastEmptyBuckets() {
1103  const KeyT Empty = KeyInfoT::getEmptyKey();
1104  const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1105 
1106  while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1107  KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1108  ++Ptr;
1109  }
1110 };
1111 
1112 template<typename KeyT, typename ValueT, typename KeyInfoT>
1113 static inline size_t
1114 capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
1115  return X.getMemorySize();
1116 }
1117 
1118 } // end namespace llvm
1119 
1120 #endif
Definition: DenseMap.h:739
Definition: Path.inc:31
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:161
Definition: DenseMap.h:585
Definition: DenseMapInfo.h:26
DenseMap(unsigned InitialReserve=0)
Create a DenseMap wth an optional InitialReserve that guarantee that this number of elements can be i...
Definition: DenseMap.h:600
size_t getMemorySize() const
Return the approximate size (in bytes) of the actual map.
Definition: DenseMap.h:577
A base class for data structure classes wishing to make iterators ("handles") pointing into themselve...
Definition: EpochTracker.h:49
const void * getPointerIntoBucketsArray() const
getPointerIntoBucketsArray() - Return an opaque pointer into the buckets array.
Definition: DenseMap.h:281
void incrementEpoch()
Calling incrementEpoch invalidates all handles pointing into the calling instance.
Definition: EpochTracker.h:57
A base class for iterator classes ("handles") that wish to poll for iterator invalidating modificatio...
Definition: EpochTracker.h:71
unsigned getMinBucketToReserveForEntries(unsigned NumEntries)
Returns the number of buckets to allocate to ensure that the DenseMap can accommodate NumEntries with...
Definition: DenseMap.h:312
Definition: DenseMap.h:39
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again...
Definition: DenseMap.h:86
void insert(InputIt I, InputIt E)
insert - Range insertion of pairs.
Definition: DenseMap.h:222
isPodLike - This is a type trait that is used to determine whether a given type can be copied around ...
Definition: Optional.h:147
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:121
bool isPointerIntoBucketsArray(const void *Ptr) const
isPointerIntoBucketsArray - Return true if the specified pointer points somewhere into the DenseMap&#39;s...
Definition: DenseMap.h:274
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive, key type.
Definition: DenseMap.h:145
Definition: DenseMap.h:50
Definition: DenseMap.h:54
std::pair< iterator, bool > insert_as(std::pair< KeyT, ValueT > &&KV, const LookupKeyT &Val)
Alternate version of insert() which allows a different, and possibly less expensive, key type.
Definition: DenseMap.h:206