WPILibC++ 2023.4.3-108-ge5452e3
DenseMap.h
Go to the documentation of this file.
1//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file defines the DenseMap class.
11///
12//===----------------------------------------------------------------------===//
13
14#ifndef WPIUTIL_WPI_DENSEMAP_H
15#define WPIUTIL_WPI_DENSEMAP_H
16
17#include "wpi/DenseMapInfo.h"
18#include "wpi/EpochTracker.h"
19#include "wpi/AlignOf.h"
20#include "wpi/Compiler.h"
21#include "wpi/MathExtras.h"
22#include "wpi/MemAlloc.h"
24#include "wpi/type_traits.h"
25#include <algorithm>
26#include <cassert>
27#include <cstddef>
28#include <cstring>
29#include <initializer_list>
30#include <iterator>
31#include <new>
32#include <type_traits>
33#include <utility>
34
35namespace wpi {
36
37namespace detail {
38
39// We extend a pair to allow users to override the bucket type with their own
40// implementation without requiring two members.
41template <typename KeyT, typename ValueT>
42struct DenseMapPair : public std::pair<KeyT, ValueT> {
43 using std::pair<KeyT, ValueT>::pair;
44
46 const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
47 ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
48 const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
49};
50
51} // end namespace detail
52
53template <typename KeyT, typename ValueT,
54 typename KeyInfoT = DenseMapInfo<KeyT>,
56 bool IsConst = false>
57class DenseMapIterator;
58
59template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
60 typename BucketT>
62 template <typename T>
63 using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
64
65public:
66 using size_type = unsigned;
67 using key_type = KeyT;
68 using mapped_type = ValueT;
69 using value_type = BucketT;
70
74
75 inline iterator begin() {
76 // When the map is empty, avoid the overhead of advancing/retreating past
77 // empty buckets.
78 if (empty())
79 return end();
80 if (shouldReverseIterate<KeyT>())
81 return makeIterator(getBucketsEnd() - 1, getBuckets(), *this);
82 return makeIterator(getBuckets(), getBucketsEnd(), *this);
83 }
84 inline iterator end() {
85 return makeIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
86 }
87 inline const_iterator begin() const {
88 if (empty())
89 return end();
90 if (shouldReverseIterate<KeyT>())
91 return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *this);
92 return makeConstIterator(getBuckets(), getBucketsEnd(), *this);
93 }
94 inline const_iterator end() const {
95 return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
96 }
97
98 LLVM_NODISCARD bool empty() const {
99 return getNumEntries() == 0;
100 }
101 unsigned size() const { return getNumEntries(); }
102
103 /// Grow the densemap so that it can contain at least \p NumEntries items
104 /// before resizing again.
105 void reserve(size_type NumEntries) {
106 auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
108 if (NumBuckets > getNumBuckets())
109 grow(NumBuckets);
110 }
111
112 void clear() {
114 if (getNumEntries() == 0 && getNumTombstones() == 0) return;
115
116 // If the capacity of the array is huge, and the # elements used is small,
117 // shrink the array.
118 if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
119 shrink_and_clear();
120 return;
121 }
122
123 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
124 if (std::is_trivially_destructible<ValueT>::value) {
125 // Use a simpler loop when values don't need destruction.
126 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
127 P->getFirst() = EmptyKey;
128 } else {
129 [[maybe_unused]] unsigned NumEntries = getNumEntries();
130 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
131 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
132 if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
133 P->getSecond().~ValueT();
134 --NumEntries;
135 }
136 P->getFirst() = EmptyKey;
137 }
138 }
139 assert(NumEntries == 0 && "Node count imbalance!");
140 }
141 setNumEntries(0);
142 setNumTombstones(0);
143 }
144
145 /// Return 1 if the specified key is in the map, 0 otherwise.
146 size_type count(const_arg_type_t<KeyT> Val) const {
147 const BucketT *TheBucket;
148 return LookupBucketFor(Val, TheBucket) ? 1 : 0;
149 }
150
151 iterator find(const_arg_type_t<KeyT> Val) {
152 BucketT *TheBucket;
153 if (LookupBucketFor(Val, TheBucket))
154 return makeIterator(TheBucket,
155 shouldReverseIterate<KeyT>() ? getBuckets()
156 : getBucketsEnd(),
157 *this, true);
158 return end();
159 }
160 const_iterator find(const_arg_type_t<KeyT> Val) const {
161 const BucketT *TheBucket;
162 if (LookupBucketFor(Val, TheBucket))
163 return makeConstIterator(TheBucket,
164 shouldReverseIterate<KeyT>() ? getBuckets()
165 : getBucketsEnd(),
166 *this, true);
167 return end();
168 }
169
170 /// Alternate version of find() which allows a different, and possibly
171 /// less expensive, key type.
172 /// The DenseMapInfo is responsible for supplying methods
173 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
174 /// type used.
175 template<class LookupKeyT>
176 iterator find_as(const LookupKeyT &Val) {
177 BucketT *TheBucket;
178 if (LookupBucketFor(Val, TheBucket))
179 return makeIterator(TheBucket,
180 shouldReverseIterate<KeyT>() ? getBuckets()
181 : getBucketsEnd(),
182 *this, true);
183 return end();
184 }
185 template<class LookupKeyT>
186 const_iterator find_as(const LookupKeyT &Val) const {
187 const BucketT *TheBucket;
188 if (LookupBucketFor(Val, TheBucket))
189 return makeConstIterator(TheBucket,
190 shouldReverseIterate<KeyT>() ? getBuckets()
191 : getBucketsEnd(),
192 *this, true);
193 return end();
194 }
195
196 /// lookup - Return the entry for the specified key, or a default
197 /// constructed value if no such entry exists.
198 ValueT lookup(const_arg_type_t<KeyT> Val) const {
199 const BucketT *TheBucket;
200 if (LookupBucketFor(Val, TheBucket))
201 return TheBucket->getSecond();
202 return ValueT();
203 }
204
205 // Inserts key,value pair into the map if the key isn't already in the map.
206 // If the key is already in the map, it returns false and doesn't update the
207 // value.
208 std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
209 return try_emplace(KV.first, KV.second);
210 }
211
212 // Inserts key,value pair into the map if the key isn't already in the map.
213 // If the key is already in the map, it returns false and doesn't update the
214 // value.
215 std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
216 return try_emplace(std::move(KV.first), std::move(KV.second));
217 }
218
219 // Inserts key,value pair into the map if the key isn't already in the map.
220 // The value is constructed in-place if the key is not in the map, otherwise
221 // it is not moved.
222 template <typename... Ts>
223 std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
224 BucketT *TheBucket;
225 if (LookupBucketFor(Key, TheBucket))
226 return std::make_pair(makeIterator(TheBucket,
227 shouldReverseIterate<KeyT>()
228 ? getBuckets()
229 : getBucketsEnd(),
230 *this, true),
231 false); // Already in map.
232
233 // Otherwise, insert the new element.
234 TheBucket =
235 InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
236 return std::make_pair(makeIterator(TheBucket,
237 shouldReverseIterate<KeyT>()
238 ? getBuckets()
239 : getBucketsEnd(),
240 *this, true),
241 true);
242 }
243
244 // Inserts key,value pair into the map if the key isn't already in the map.
245 // The value is constructed in-place if the key is not in the map, otherwise
246 // it is not moved.
247 template <typename... Ts>
248 std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) {
249 BucketT *TheBucket;
250 if (LookupBucketFor(Key, TheBucket))
251 return std::make_pair(makeIterator(TheBucket,
252 shouldReverseIterate<KeyT>()
253 ? getBuckets()
254 : getBucketsEnd(),
255 *this, true),
256 false); // Already in map.
257
258 // Otherwise, insert the new element.
259 TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
260 return std::make_pair(makeIterator(TheBucket,
261 shouldReverseIterate<KeyT>()
262 ? getBuckets()
263 : getBucketsEnd(),
264 *this, true),
265 true);
266 }
267
268 /// Alternate version of insert() which allows a different, and possibly
269 /// less expensive, key type.
270 /// The DenseMapInfo is responsible for supplying methods
271 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
272 /// type used.
273 template <typename LookupKeyT>
274 std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
275 const LookupKeyT &Val) {
276 BucketT *TheBucket;
277 if (LookupBucketFor(Val, TheBucket))
278 return std::make_pair(makeIterator(TheBucket,
279 shouldReverseIterate<KeyT>()
280 ? getBuckets()
281 : getBucketsEnd(),
282 *this, true),
283 false); // Already in map.
284
285 // Otherwise, insert the new element.
286 TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
287 std::move(KV.second), Val);
288 return std::make_pair(makeIterator(TheBucket,
289 shouldReverseIterate<KeyT>()
290 ? getBuckets()
291 : getBucketsEnd(),
292 *this, true),
293 true);
294 }
295
296 /// insert - Range insertion of pairs.
297 template<typename InputIt>
298 void insert(InputIt I, InputIt E) {
299 for (; I != E; ++I)
300 insert(*I);
301 }
302
303 bool erase(const KeyT &Val) {
304 BucketT *TheBucket;
305 if (!LookupBucketFor(Val, TheBucket))
306 return false; // not in map.
307
308 TheBucket->getSecond().~ValueT();
309 TheBucket->getFirst() = getTombstoneKey();
310 decrementNumEntries();
311 incrementNumTombstones();
312 return true;
313 }
314 void erase(iterator I) {
315 BucketT *TheBucket = &*I;
316 TheBucket->getSecond().~ValueT();
317 TheBucket->getFirst() = getTombstoneKey();
318 decrementNumEntries();
319 incrementNumTombstones();
320 }
321
322 value_type& FindAndConstruct(const KeyT &Key) {
323 BucketT *TheBucket;
324 if (LookupBucketFor(Key, TheBucket))
325 return *TheBucket;
326
327 return *InsertIntoBucket(TheBucket, Key);
328 }
329
330 ValueT &operator[](const KeyT &Key) {
331 return FindAndConstruct(Key).second;
332 }
333
335 BucketT *TheBucket;
336 if (LookupBucketFor(Key, TheBucket))
337 return *TheBucket;
338
339 return *InsertIntoBucket(TheBucket, std::move(Key));
340 }
341
342 ValueT &operator[](KeyT &&Key) {
343 return FindAndConstruct(std::move(Key)).second;
344 }
345
346 /// isPointerIntoBucketsArray - Return true if the specified pointer points
347 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
348 /// value in the DenseMap).
349 bool isPointerIntoBucketsArray(const void *Ptr) const {
350 return Ptr >= getBuckets() && Ptr < getBucketsEnd();
351 }
352
353 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
354 /// array. In conjunction with the previous method, this can be used to
355 /// determine whether an insertion caused the DenseMap to reallocate.
356 const void *getPointerIntoBucketsArray() const { return getBuckets(); }
357
358protected:
359 DenseMapBase() = default;
360
361 void destroyAll() {
362 if (getNumBuckets() == 0) // Nothing to do.
363 return;
364
365 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
366 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
367 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
368 !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
369 P->getSecond().~ValueT();
370 P->getFirst().~KeyT();
371 }
372 }
373
374 void initEmpty() {
375 setNumEntries(0);
376 setNumTombstones(0);
377
378 assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
379 "# initial buckets must be a power of two!");
380 const KeyT EmptyKey = getEmptyKey();
381 for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
382 ::new (&B->getFirst()) KeyT(EmptyKey);
383 }
384
385 /// Returns the number of buckets to allocate to ensure that the DenseMap can
386 /// accommodate \p NumEntries without need to grow().
387 unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
388 // Ensure that "NumEntries * 4 < NumBuckets * 3"
389 if (NumEntries == 0)
390 return 0;
391 // +1 is required because of the strict equality.
392 // For example if NumEntries is 48, we need to return 401.
393 return static_cast<unsigned>(NextPowerOf2(NumEntries * 4 / 3 + 1));
394 }
395
396 void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
397 initEmpty();
398
399 // Insert all the old elements.
400 const KeyT EmptyKey = getEmptyKey();
401 const KeyT TombstoneKey = getTombstoneKey();
402 for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
403 if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
404 !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
405 // Insert the key/value into the new table.
406 BucketT *DestBucket;
407 bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
408 (void)FoundVal; // silence warning.
409 assert(!FoundVal && "Key already in new map?");
410 DestBucket->getFirst() = std::move(B->getFirst());
411 ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
412 incrementNumEntries();
413
414 // Free the value.
415 B->getSecond().~ValueT();
416 }
417 B->getFirst().~KeyT();
418 }
419 }
420
421 template <typename OtherBaseT>
424 assert(&other != this);
425 assert(getNumBuckets() == other.getNumBuckets());
426
427 setNumEntries(other.getNumEntries());
428 setNumTombstones(other.getNumTombstones());
429
430 if (std::is_trivially_copyable<KeyT>::value &&
431 std::is_trivially_copyable<ValueT>::value)
432 memcpy(reinterpret_cast<void *>(getBuckets()), other.getBuckets(),
433 getNumBuckets() * sizeof(BucketT));
434 else
435 for (size_t i = 0; i < getNumBuckets(); ++i) {
436 ::new (&getBuckets()[i].getFirst())
437 KeyT(other.getBuckets()[i].getFirst());
438 if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
439 !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
440 ::new (&getBuckets()[i].getSecond())
441 ValueT(other.getBuckets()[i].getSecond());
442 }
443 }
444
445 static unsigned getHashValue(const KeyT &Val) {
446 return KeyInfoT::getHashValue(Val);
447 }
448
449 template<typename LookupKeyT>
450 static unsigned getHashValue(const LookupKeyT &Val) {
451 return KeyInfoT::getHashValue(Val);
452 }
453
454 static const KeyT getEmptyKey() {
455 static_assert(std::is_base_of<DenseMapBase, DerivedT>::value,
456 "Must pass the derived type to this template!");
457 return KeyInfoT::getEmptyKey();
458 }
459
460 static const KeyT getTombstoneKey() {
461 return KeyInfoT::getTombstoneKey();
462 }
463
464private:
465 iterator makeIterator(BucketT *P, BucketT *E,
466 DebugEpochBase &Epoch,
467 bool NoAdvance=false) {
468 if (shouldReverseIterate<KeyT>()) {
469 BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
470 return iterator(B, E, Epoch, NoAdvance);
471 }
472 return iterator(P, E, Epoch, NoAdvance);
473 }
474
475 const_iterator makeConstIterator(const BucketT *P, const BucketT *E,
476 const DebugEpochBase &Epoch,
477 const bool NoAdvance=false) const {
478 if (shouldReverseIterate<KeyT>()) {
479 const BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
480 return const_iterator(B, E, Epoch, NoAdvance);
481 }
482 return const_iterator(P, E, Epoch, NoAdvance);
483 }
484
485 unsigned getNumEntries() const {
486 return static_cast<const DerivedT *>(this)->getNumEntries();
487 }
488
489 void setNumEntries(unsigned Num) {
490 static_cast<DerivedT *>(this)->setNumEntries(Num);
491 }
492
493 void incrementNumEntries() {
494 setNumEntries(getNumEntries() + 1);
495 }
496
497 void decrementNumEntries() {
498 setNumEntries(getNumEntries() - 1);
499 }
500
501 unsigned getNumTombstones() const {
502 return static_cast<const DerivedT *>(this)->getNumTombstones();
503 }
504
505 void setNumTombstones(unsigned Num) {
506 static_cast<DerivedT *>(this)->setNumTombstones(Num);
507 }
508
509 void incrementNumTombstones() {
510 setNumTombstones(getNumTombstones() + 1);
511 }
512
513 void decrementNumTombstones() {
514 setNumTombstones(getNumTombstones() - 1);
515 }
516
517 const BucketT *getBuckets() const {
518 return static_cast<const DerivedT *>(this)->getBuckets();
519 }
520
521 BucketT *getBuckets() {
522 return static_cast<DerivedT *>(this)->getBuckets();
523 }
524
525 unsigned getNumBuckets() const {
526 return static_cast<const DerivedT *>(this)->getNumBuckets();
527 }
528
529 BucketT *getBucketsEnd() {
530 return getBuckets() + getNumBuckets();
531 }
532
533 const BucketT *getBucketsEnd() const {
534 return getBuckets() + getNumBuckets();
535 }
536
537 void grow(unsigned AtLeast) {
538 static_cast<DerivedT *>(this)->grow(AtLeast);
539 }
540
541 void shrink_and_clear() {
542 static_cast<DerivedT *>(this)->shrink_and_clear();
543 }
544
545 template <typename KeyArg, typename... ValueArgs>
546 BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
547 ValueArgs &&... Values) {
548 TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
549
550 TheBucket->getFirst() = std::forward<KeyArg>(Key);
551 ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...);
552 return TheBucket;
553 }
554
555 template <typename LookupKeyT>
556 BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
557 ValueT &&Value, LookupKeyT &Lookup) {
558 TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
559
560 TheBucket->getFirst() = std::move(Key);
561 ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
562 return TheBucket;
563 }
564
565 template <typename LookupKeyT>
566 BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
567 BucketT *TheBucket) {
569
570 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
571 // the buckets are empty (meaning that many are filled with tombstones),
572 // grow the table.
573 //
574 // The later case is tricky. For example, if we had one empty bucket with
575 // tons of tombstones, failing lookups (e.g. for insertion) would have to
576 // probe almost the entire table until it found the empty bucket. If the
577 // table completely filled with tombstones, no lookup would ever succeed,
578 // causing infinite loops in lookup.
579 unsigned NewNumEntries = getNumEntries() + 1;
580 unsigned NumBuckets = getNumBuckets();
581 if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
582 this->grow(NumBuckets * 2);
583 LookupBucketFor(Lookup, TheBucket);
584 NumBuckets = getNumBuckets();
585 } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
586 NumBuckets/8)) {
587 this->grow(NumBuckets);
588 LookupBucketFor(Lookup, TheBucket);
589 }
590 assert(TheBucket);
591
592 // Only update the state after we've grown our bucket space appropriately
593 // so that when growing buckets we have self-consistent entry count.
594 incrementNumEntries();
595
596 // If we are writing over a tombstone, remember this.
597 const KeyT EmptyKey = getEmptyKey();
598 if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
599 decrementNumTombstones();
600
601 return TheBucket;
602 }
603
604 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
605 /// FoundBucket. If the bucket contains the key and a value, this returns
606 /// true, otherwise it returns a bucket with an empty marker or tombstone and
607 /// returns false.
608 template<typename LookupKeyT>
609 bool LookupBucketFor(const LookupKeyT &Val,
610 const BucketT *&FoundBucket) const {
611 const BucketT *BucketsPtr = getBuckets();
612 const unsigned NumBuckets = getNumBuckets();
613
614 if (NumBuckets == 0) {
615 FoundBucket = nullptr;
616 return false;
617 }
618
619 // FoundTombstone - Keep track of whether we find a tombstone while probing.
620 const BucketT *FoundTombstone = nullptr;
621 const KeyT EmptyKey = getEmptyKey();
622 const KeyT TombstoneKey = getTombstoneKey();
623 assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
624 !KeyInfoT::isEqual(Val, TombstoneKey) &&
625 "Empty/Tombstone value shouldn't be inserted into map!");
626
627 unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
628 unsigned ProbeAmt = 1;
629 while (true) {
630 const BucketT *ThisBucket = BucketsPtr + BucketNo;
631 // Found Val's bucket? If so, return it.
632 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
633 FoundBucket = ThisBucket;
634 return true;
635 }
636
637 // If we found an empty bucket, the key doesn't exist in the set.
638 // Insert it and return the default value.
639 if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
640 // If we've already seen a tombstone while probing, fill it in instead
641 // of the empty bucket we eventually probed to.
642 FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
643 return false;
644 }
645
646 // If this is a tombstone, remember it. If Val ends up not in the map, we
647 // prefer to return it than something that would require more probing.
648 if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
649 !FoundTombstone)
650 FoundTombstone = ThisBucket; // Remember the first tombstone found.
651
652 // Otherwise, it's a hash collision or a tombstone, continue quadratic
653 // probing.
654 BucketNo += ProbeAmt++;
655 BucketNo &= (NumBuckets-1);
656 }
657 }
658
659 template <typename LookupKeyT>
660 bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
661 const BucketT *ConstFoundBucket;
662 bool Result = const_cast<const DenseMapBase *>(this)
663 ->LookupBucketFor(Val, ConstFoundBucket);
664 FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
665 return Result;
666 }
667
668public:
669 /// Return the approximate size (in bytes) of the actual map.
670 /// This is just the raw memory used by DenseMap.
671 /// If entries are pointers to objects, the size of the referenced objects
672 /// are not included.
673 size_t getMemorySize() const {
674 return getNumBuckets() * sizeof(BucketT);
675 }
676};
677
678/// Equality comparison for DenseMap.
679///
680/// Iterates over elements of LHS confirming that each (key, value) pair in LHS
681/// is also in RHS, and that no additional pairs are in RHS.
682/// Equivalent to N calls to RHS.find and N value comparisons. Amortized
683/// complexity is linear, worst case is O(N^2) (if every hash collides).
684template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
685 typename BucketT>
689 if (LHS.size() != RHS.size())
690 return false;
691
692 for (auto &KV : LHS) {
693 auto I = RHS.find(KV.first);
694 if (I == RHS.end() || I->second != KV.second)
695 return false;
696 }
697
698 return true;
699}
700
701/// Inequality comparison for DenseMap.
702///
703/// Equivalent to !(LHS == RHS). See operator== for performance notes.
704template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
705 typename BucketT>
709 return !(LHS == RHS);
710}
711
712template <typename KeyT, typename ValueT,
713 typename KeyInfoT = DenseMapInfo<KeyT>,
715class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
716 KeyT, ValueT, KeyInfoT, BucketT> {
717 friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
718
719 // Lift some types from the dependent base class into this class for
720 // simplicity of referring to them.
722
723 BucketT *Buckets;
724 unsigned NumEntries;
725 unsigned NumTombstones;
726 unsigned NumBuckets;
727
728public:
729 /// Create a DenseMap with an optional \p InitialReserve that guarantee that
730 /// this number of elements can be inserted in the map without grow()
731 explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
732
733 DenseMap(const DenseMap &other) : BaseT() {
734 init(0);
735 copyFrom(other);
736 }
737
738 DenseMap(DenseMap &&other) : BaseT() {
739 init(0);
740 swap(other);
741 }
742
743 template<typename InputIt>
744 DenseMap(const InputIt &I, const InputIt &E) {
745 init(std::distance(I, E));
746 this->insert(I, E);
747 }
748
749 DenseMap(std::initializer_list<typename BaseT::value_type> Vals) {
750 init(Vals.size());
751 this->insert(Vals.begin(), Vals.end());
752 }
753
755 this->destroyAll();
756 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
757 }
758
759 void swap(DenseMap& RHS) {
760 this->incrementEpoch();
761 RHS.incrementEpoch();
762 std::swap(Buckets, RHS.Buckets);
763 std::swap(NumEntries, RHS.NumEntries);
764 std::swap(NumTombstones, RHS.NumTombstones);
765 std::swap(NumBuckets, RHS.NumBuckets);
766 }
767
768 DenseMap& operator=(const DenseMap& other) {
769 if (&other != this)
770 copyFrom(other);
771 return *this;
772 }
773
775 this->destroyAll();
776 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
777 init(0);
778 swap(other);
779 return *this;
780 }
781
782 void copyFrom(const DenseMap& other) {
783 this->destroyAll();
784 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
785 if (allocateBuckets(other.NumBuckets)) {
786 this->BaseT::copyFrom(other);
787 } else {
788 NumEntries = 0;
789 NumTombstones = 0;
790 }
791 }
792
793 void init(unsigned InitNumEntries) {
794 auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
795 if (allocateBuckets(InitBuckets)) {
796 this->BaseT::initEmpty();
797 } else {
798 NumEntries = 0;
799 NumTombstones = 0;
800 }
801 }
802
803 void grow(unsigned AtLeast) {
804 unsigned OldNumBuckets = NumBuckets;
805 BucketT *OldBuckets = Buckets;
806
807 allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
808 assert(Buckets);
809 if (!OldBuckets) {
810 this->BaseT::initEmpty();
811 return;
812 }
813
814 this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
815
816 // Free the old table.
817 deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
818 alignof(BucketT));
819 }
820
822 unsigned OldNumBuckets = NumBuckets;
823 unsigned OldNumEntries = NumEntries;
824 this->destroyAll();
825
826 // Reduce the number of buckets.
827 unsigned NewNumBuckets = 0;
828 if (OldNumEntries)
829 NewNumBuckets = (std::max)(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
830 if (NewNumBuckets == NumBuckets) {
831 this->BaseT::initEmpty();
832 return;
833 }
834
835 deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets,
836 alignof(BucketT));
837 init(NewNumBuckets);
838 }
839
840private:
841 unsigned getNumEntries() const {
842 return NumEntries;
843 }
844
845 void setNumEntries(unsigned Num) {
846 NumEntries = Num;
847 }
848
849 unsigned getNumTombstones() const {
850 return NumTombstones;
851 }
852
853 void setNumTombstones(unsigned Num) {
854 NumTombstones = Num;
855 }
856
857 BucketT *getBuckets() const {
858 return Buckets;
859 }
860
861 unsigned getNumBuckets() const {
862 return NumBuckets;
863 }
864
865 bool allocateBuckets(unsigned Num) {
866 NumBuckets = Num;
867 if (NumBuckets == 0) {
868 Buckets = nullptr;
869 return false;
870 }
871
872 Buckets = static_cast<BucketT *>(
873 allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT)));
874 return true;
875 }
876};
877
878template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
879 typename KeyInfoT = DenseMapInfo<KeyT>,
882 : public DenseMapBase<
883 SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
884 ValueT, KeyInfoT, BucketT> {
885 friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
886
887 // Lift some types from the dependent base class into this class for
888 // simplicity of referring to them.
890
891 static_assert(isPowerOf2_64(InlineBuckets),
892 "InlineBuckets must be a power of 2.");
893
894 unsigned Small : 1;
895 unsigned NumEntries : 31;
896 unsigned NumTombstones;
897
898 struct LargeRep {
899 BucketT *Buckets;
900 unsigned NumBuckets;
901 };
902
903 /// A "union" of an inline bucket array and the struct representing
904 /// a large bucket. This union will be discriminated by the 'Small' bit.
906
907public:
908 explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
909 init(NumInitBuckets);
910 }
911
913 init(0);
914 copyFrom(other);
915 }
916
918 init(0);
919 swap(other);
920 }
921
922 template<typename InputIt>
923 SmallDenseMap(const InputIt &I, const InputIt &E) {
924 init(NextPowerOf2(std::distance(I, E)));
925 this->insert(I, E);
926 }
927
928 SmallDenseMap(std::initializer_list<typename BaseT::value_type> Vals)
929 : SmallDenseMap(Vals.begin(), Vals.end()) {}
930
932 this->destroyAll();
933 deallocateBuckets();
934 }
935
936 void swap(SmallDenseMap& RHS) {
937 unsigned TmpNumEntries = RHS.NumEntries;
938 RHS.NumEntries = NumEntries;
939 NumEntries = TmpNumEntries;
940 std::swap(NumTombstones, RHS.NumTombstones);
941
942 const KeyT EmptyKey = this->getEmptyKey();
943 const KeyT TombstoneKey = this->getTombstoneKey();
944 if (Small && RHS.Small) {
945 // If we're swapping inline bucket arrays, we have to cope with some of
946 // the tricky bits of DenseMap's storage system: the buckets are not
947 // fully initialized. Thus we swap every key, but we may have
948 // a one-directional move of the value.
949 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
950 BucketT *LHSB = &getInlineBuckets()[i],
951 *RHSB = &RHS.getInlineBuckets()[i];
952 bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
953 !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
954 bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
955 !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
956 if (hasLHSValue && hasRHSValue) {
957 // Swap together if we can...
958 std::swap(*LHSB, *RHSB);
959 continue;
960 }
961 // Swap separately and handle any asymmetry.
962 std::swap(LHSB->getFirst(), RHSB->getFirst());
963 if (hasLHSValue) {
964 ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
965 LHSB->getSecond().~ValueT();
966 } else if (hasRHSValue) {
967 ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
968 RHSB->getSecond().~ValueT();
969 }
970 }
971 return;
972 }
973 if (!Small && !RHS.Small) {
974 std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
975 std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
976 return;
977 }
978
979 SmallDenseMap &SmallSide = Small ? *this : RHS;
980 SmallDenseMap &LargeSide = Small ? RHS : *this;
981
982 // First stash the large side's rep and move the small side across.
983 LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
984 LargeSide.getLargeRep()->~LargeRep();
985 LargeSide.Small = true;
986 // This is similar to the standard move-from-old-buckets, but the bucket
987 // count hasn't actually rotated in this case. So we have to carefully
988 // move construct the keys and values into their new locations, but there
989 // is no need to re-hash things.
990 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
991 BucketT *NewB = &LargeSide.getInlineBuckets()[i],
992 *OldB = &SmallSide.getInlineBuckets()[i];
993 ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
994 OldB->getFirst().~KeyT();
995 if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
996 !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
997 ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
998 OldB->getSecond().~ValueT();
999 }
1000 }
1001
1002 // The hard part of moving the small buckets across is done, just move
1003 // the TmpRep into its new home.
1004 SmallSide.Small = false;
1005 new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
1006 }
1007
1009 if (&other != this)
1010 copyFrom(other);
1011 return *this;
1012 }
1013
1015 this->destroyAll();
1016 deallocateBuckets();
1017 init(0);
1018 swap(other);
1019 return *this;
1020 }
1021
1022 void copyFrom(const SmallDenseMap& other) {
1023 this->destroyAll();
1024 deallocateBuckets();
1025 Small = true;
1026 if (other.getNumBuckets() > InlineBuckets) {
1027 Small = false;
1028 new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
1029 }
1030 this->BaseT::copyFrom(other);
1031 }
1032
1033 void init(unsigned InitBuckets) {
1034 Small = true;
1035 if (InitBuckets > InlineBuckets) {
1036 Small = false;
1037 new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
1038 }
1039 this->BaseT::initEmpty();
1040 }
1041
1042 void grow(unsigned AtLeast) {
1043 if (AtLeast > InlineBuckets)
1044 AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
1045
1046 if (Small) {
1047 // First move the inline buckets into a temporary storage.
1049 BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
1050 BucketT *TmpEnd = TmpBegin;
1051
1052 // Loop over the buckets, moving non-empty, non-tombstones into the
1053 // temporary storage. Have the loop move the TmpEnd forward as it goes.
1054 const KeyT EmptyKey = this->getEmptyKey();
1055 const KeyT TombstoneKey = this->getTombstoneKey();
1056 for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
1057 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
1058 !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
1059 assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
1060 "Too many inline buckets!");
1061 ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
1062 ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
1063 ++TmpEnd;
1064 P->getSecond().~ValueT();
1065 }
1066 P->getFirst().~KeyT();
1067 }
1068
1069 // AtLeast == InlineBuckets can happen if there are many tombstones,
1070 // and grow() is used to remove them. Usually we always switch to the
1071 // large rep here.
1072 if (AtLeast > InlineBuckets) {
1073 Small = false;
1074 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1075 }
1076 this->moveFromOldBuckets(TmpBegin, TmpEnd);
1077 return;
1078 }
1079
1080 LargeRep OldRep = std::move(*getLargeRep());
1081 getLargeRep()->~LargeRep();
1082 if (AtLeast <= InlineBuckets) {
1083 Small = true;
1084 } else {
1085 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1086 }
1087
1088 this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
1089
1090 // Free the old table.
1091 deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
1092 alignof(BucketT));
1093 }
1094
1096 unsigned OldSize = this->size();
1097 this->destroyAll();
1098
1099 // Reduce the number of buckets.
1100 unsigned NewNumBuckets = 0;
1101 if (OldSize) {
1102 NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
1103 if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
1104 NewNumBuckets = 64;
1105 }
1106 if ((Small && NewNumBuckets <= InlineBuckets) ||
1107 (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
1108 this->BaseT::initEmpty();
1109 return;
1110 }
1111
1112 deallocateBuckets();
1113 init(NewNumBuckets);
1114 }
1115
1116private:
1117 unsigned getNumEntries() const {
1118 return NumEntries;
1119 }
1120
1121 void setNumEntries(unsigned Num) {
1122 // NumEntries is hardcoded to be 31 bits wide.
1123 assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
1124 NumEntries = Num;
1125 }
1126
1127 unsigned getNumTombstones() const {
1128 return NumTombstones;
1129 }
1130
1131 void setNumTombstones(unsigned Num) {
1132 NumTombstones = Num;
1133 }
1134
1135 const BucketT *getInlineBuckets() const {
1136 assert(Small);
1137 // Note that this cast does not violate aliasing rules as we assert that
1138 // the memory's dynamic type is the small, inline bucket buffer, and the
1139 // 'storage' is a POD containing a char buffer.
1140 return reinterpret_cast<const BucketT *>(&storage);
1141 }
1142
1143 BucketT *getInlineBuckets() {
1144 return const_cast<BucketT *>(
1145 const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
1146 }
1147
1148 const LargeRep *getLargeRep() const {
1149 assert(!Small);
1150 // Note, same rule about aliasing as with getInlineBuckets.
1151 return reinterpret_cast<const LargeRep *>(&storage);
1152 }
1153
1154 LargeRep *getLargeRep() {
1155 return const_cast<LargeRep *>(
1156 const_cast<const SmallDenseMap *>(this)->getLargeRep());
1157 }
1158
1159 const BucketT *getBuckets() const {
1160 return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1161 }
1162
1163 BucketT *getBuckets() {
1164 return const_cast<BucketT *>(
1165 const_cast<const SmallDenseMap *>(this)->getBuckets());
1166 }
1167
1168 unsigned getNumBuckets() const {
1169 return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1170 }
1171
1172 void deallocateBuckets() {
1173 if (Small)
1174 return;
1175
1176 deallocate_buffer(getLargeRep()->Buckets,
1177 sizeof(BucketT) * getLargeRep()->NumBuckets,
1178 alignof(BucketT));
1179 getLargeRep()->~LargeRep();
1180 }
1181
1182 LargeRep allocateBuckets(unsigned Num) {
1183 assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
1184 LargeRep Rep = {static_cast<BucketT *>(allocate_buffer(
1185 sizeof(BucketT) * Num, alignof(BucketT))),
1186 Num};
1187 return Rep;
1188 }
1189};
1190
1191template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
1192 bool IsConst>
1194 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1195 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1196
1197public:
1198 using difference_type = ptrdiff_t;
1203 using iterator_category = std::forward_iterator_tag;
1204
1205private:
1206 pointer Ptr = nullptr;
1207 pointer End = nullptr;
1208
1209public:
1210 DenseMapIterator() = default;
1211
1213 bool NoAdvance = false)
1214 : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1215 assert(isHandleInSync() && "invalid construction!");
1216
1217 if (NoAdvance) return;
1218 if (shouldReverseIterate<KeyT>()) {
1219 RetreatPastEmptyBuckets();
1220 return;
1221 }
1222 AdvancePastEmptyBuckets();
1223 }
1224
1225 // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1226 // for const iterator destinations so it doesn't end up as a user defined copy
1227 // constructor.
1228 template <bool IsConstSrc,
1229 typename = std::enable_if_t<!IsConstSrc && IsConst>>
1232 : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
1233
1235 assert(isHandleInSync() && "invalid iterator access!");
1236 assert(Ptr != End && "dereferencing end() iterator");
1237 if (shouldReverseIterate<KeyT>())
1238 return Ptr[-1];
1239 return *Ptr;
1240 }
1242 assert(isHandleInSync() && "invalid iterator access!");
1243 assert(Ptr != End && "dereferencing end() iterator");
1244 if (shouldReverseIterate<KeyT>())
1245 return &(Ptr[-1]);
1246 return Ptr;
1247 }
1248
1249 friend bool operator==(const DenseMapIterator &LHS,
1250 const DenseMapIterator &RHS) {
1251 assert((!LHS.Ptr || LHS.isHandleInSync()) && "handle not in sync!");
1252 assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1253 assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&
1254 "comparing incomparable iterators!");
1255 return LHS.Ptr == RHS.Ptr;
1256 }
1257
1258 friend bool operator!=(const DenseMapIterator &LHS,
1259 const DenseMapIterator &RHS) {
1260 return !(LHS == RHS);
1261 }
1262
1263 inline DenseMapIterator& operator++() { // Preincrement
1264 assert(isHandleInSync() && "invalid iterator access!");
1265 assert(Ptr != End && "incrementing end() iterator");
1266 if (shouldReverseIterate<KeyT>()) {
1267 --Ptr;
1268 RetreatPastEmptyBuckets();
1269 return *this;
1270 }
1271 ++Ptr;
1272 AdvancePastEmptyBuckets();
1273 return *this;
1274 }
1275 DenseMapIterator operator++(int) { // Postincrement
1276 assert(isHandleInSync() && "invalid iterator access!");
1277 DenseMapIterator tmp = *this; ++*this; return tmp;
1278 }
1279
1280private:
1281 void AdvancePastEmptyBuckets() {
1282 assert(Ptr <= End);
1283 const KeyT Empty = KeyInfoT::getEmptyKey();
1284 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1285
1286 while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1287 KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1288 ++Ptr;
1289 }
1290
1291 void RetreatPastEmptyBuckets() {
1292 assert(Ptr >= End);
1293 const KeyT Empty = KeyInfoT::getEmptyKey();
1294 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1295
1296 while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) ||
1297 KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone)))
1298 --Ptr;
1299 }
1300};
1301
1302template <typename KeyT, typename ValueT, typename KeyInfoT>
1304 return X.getMemorySize();
1305}
1306
1307} // end namespace wpi
1308
1309#endif // WPIUTIL_WPI_DENSEMAP_H
#define LLVM_UNLIKELY(EXPR)
Definition: Compiler.h:250
#define LLVM_NODISCARD
LLVM_NODISCARD - Warn if a type or return value is discarded.
Definition: Compiler.h:177
#define LLVM_LIKELY(EXPR)
Definition: Compiler.h:249
This file defines DenseMapInfo traits for DenseMap.
This file defines the DebugEpochBase and DebugEpochBase::HandleBase classes.
This file defines counterparts of C library allocation functions defined in the namespace 'std'.
A base class for iterator classes ("handles") that wish to poll for iterator invalidating modificatio...
Definition: EpochTracker.h:57
A base class for data structure classes wishing to make iterators ("handles") pointing into themselve...
Definition: EpochTracker.h:35
void incrementEpoch()
Calling incrementEpoch invalidates all handles pointing into the calling instance.
Definition: EpochTracker.h:43
DebugEpochBase()
Definition: EpochTracker.h:39
Definition: DenseMap.h:61
void copyFrom(const DenseMapBase< OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT > &other)
Definition: DenseMap.h:422
bool isPointerIntoBucketsArray(const void *Ptr) const
isPointerIntoBucketsArray - Return true if the specified pointer points somewhere into the DenseMap's...
Definition: DenseMap.h:349
LLVM_NODISCARD bool empty() const
Definition: DenseMap.h:98
const_iterator find_as(const LookupKeyT &Val) const
Definition: DenseMap.h:186
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition: DenseMap.h:71
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
Definition: DenseMap.h:223
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(std::pair< KeyT, ValueT > &&KV)
Definition: DenseMap.h:215
static const KeyT getEmptyKey()
Definition: DenseMap.h:454
bool erase(const KeyT &Val)
Definition: DenseMap.h:303
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:146
ValueT mapped_type
Definition: DenseMap.h:68
void erase(iterator I)
Definition: DenseMap.h:314
void initEmpty()
Definition: DenseMap.h:374
unsigned size() const
Definition: DenseMap.h:101
static unsigned getHashValue(const KeyT &Val)
Definition: DenseMap.h:445
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Definition: DenseMap.h:105
void insert(InputIt I, InputIt E)
insert - Range insertion of pairs.
Definition: DenseMap.h:298
BucketT value_type
Definition: DenseMap.h:69
const void * getPointerIntoBucketsArray() const
getPointerIntoBucketsArray() - Return an opaque pointer into the buckets array.
Definition: DenseMap.h:356
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:208
const_iterator end() const
Definition: DenseMap.h:94
KeyT key_type
Definition: DenseMap.h:67
value_type & FindAndConstruct(const KeyT &Key)
Definition: DenseMap.h:322
void clear()
Definition: DenseMap.h:112
void destroyAll()
Definition: DenseMap.h:361
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
Definition: DenseMap.h:73
iterator begin()
Definition: DenseMap.h:75
ValueT & operator[](const KeyT &Key)
Definition: DenseMap.h:330
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:198
static unsigned getHashValue(const LookupKeyT &Val)
Definition: DenseMap.h:450
DenseMapBase()=default
unsigned size_type
Definition: DenseMap.h:66
std::pair< iterator, bool > insert_as(std::pair< KeyT, ValueT > &&KV, const LookupKeyT &Val)
Alternate version of insert() which allows a different, and possibly less expensive,...
Definition: DenseMap.h:274
unsigned getMinBucketToReserveForEntries(unsigned NumEntries)
Returns the number of buckets to allocate to ensure that the DenseMap can accommodate NumEntries with...
Definition: DenseMap.h:387
void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd)
Definition: DenseMap.h:396
const_iterator find(const_arg_type_t< KeyT > Val) const
Definition: DenseMap.h:160
static const KeyT getTombstoneKey()
Definition: DenseMap.h:460
ValueT & operator[](KeyT &&Key)
Definition: DenseMap.h:342
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:151
value_type & FindAndConstruct(KeyT &&Key)
Definition: DenseMap.h:334
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
Definition: DenseMap.h:176
size_t getMemorySize() const
Return the approximate size (in bytes) of the actual map.
Definition: DenseMap.h:673
const_iterator begin() const
Definition: DenseMap.h:87
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&... Args)
Definition: DenseMap.h:248
Definition: DenseMap.h:716
void init(unsigned InitNumEntries)
Definition: DenseMap.h:793
void copyFrom(const DenseMap &other)
Definition: DenseMap.h:782
DenseMap & operator=(DenseMap &&other)
Definition: DenseMap.h:774
DenseMap & operator=(const DenseMap &other)
Definition: DenseMap.h:768
DenseMap(const DenseMap &other)
Definition: DenseMap.h:733
DenseMap(std::initializer_list< typename BaseT::value_type > Vals)
Definition: DenseMap.h:749
void grow(unsigned AtLeast)
Definition: DenseMap.h:803
void shrink_and_clear()
Definition: DenseMap.h:821
DenseMap(const InputIt &I, const InputIt &E)
Definition: DenseMap.h:744
void swap(DenseMap &RHS)
Definition: DenseMap.h:759
DenseMap(DenseMap &&other)
Definition: DenseMap.h:738
DenseMap(unsigned InitialReserve=0)
Create a DenseMap with an optional InitialReserve that guarantee that this number of elements can be ...
Definition: DenseMap.h:731
~DenseMap()
Definition: DenseMap.h:754
Definition: DenseMap.h:1193
DenseMapIterator & operator++()
Definition: DenseMap.h:1263
friend bool operator!=(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition: DenseMap.h:1258
DenseMapIterator(const DenseMapIterator< KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc > &I)
Definition: DenseMap.h:1230
typename std::conditional< IsConst, const Bucket, Bucket >::type value_type
Definition: DenseMap.h:1200
reference operator*() const
Definition: DenseMap.h:1234
pointer operator->() const
Definition: DenseMap.h:1241
DenseMapIterator()=default
DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch, bool NoAdvance=false)
Definition: DenseMap.h:1212
std::forward_iterator_tag iterator_category
Definition: DenseMap.h:1203
ptrdiff_t difference_type
Definition: DenseMap.h:1198
value_type * pointer
Definition: DenseMap.h:1201
DenseMapIterator operator++(int)
Definition: DenseMap.h:1275
friend bool operator==(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition: DenseMap.h:1249
value_type & reference
Definition: DenseMap.h:1202
Definition: DenseMap.h:884
~SmallDenseMap()
Definition: DenseMap.h:931
SmallDenseMap(unsigned NumInitBuckets=0)
Definition: DenseMap.h:908
void swap(SmallDenseMap &RHS)
Definition: DenseMap.h:936
void copyFrom(const SmallDenseMap &other)
Definition: DenseMap.h:1022
void init(unsigned InitBuckets)
Definition: DenseMap.h:1033
SmallDenseMap & operator=(SmallDenseMap &&other)
Definition: DenseMap.h:1014
SmallDenseMap(const SmallDenseMap &other)
Definition: DenseMap.h:912
SmallDenseMap(SmallDenseMap &&other)
Definition: DenseMap.h:917
void shrink_and_clear()
Definition: DenseMap.h:1095
void grow(unsigned AtLeast)
Definition: DenseMap.h:1042
SmallDenseMap(const InputIt &I, const InputIt &E)
Definition: DenseMap.h:923
SmallDenseMap(std::initializer_list< typename BaseT::value_type > Vals)
Definition: DenseMap.h:928
SmallDenseMap & operator=(const SmallDenseMap &other)
Definition: DenseMap.h:1008
type
Definition: core.h:575
constexpr common_t< T1, T2 > max(const T1 x, const T2 y) noexcept
Compile-time pairwise maximum function.
Definition: max.hpp:35
EIGEN_CONSTEXPR Index first(const T &x) EIGEN_NOEXCEPT
Definition: IndexedViewHelper.h:81
EIGEN_STRONG_INLINE void swap(T &a, T &b)
Definition: Meta.h:766
@ Small
Definition: GeneralProduct.h:18
Definition: format-inl.h:32
const int Empty
Definition: Eigen_Colamd.h:116
Definition: BFloat16.h:88
void swap(wpi::SmallPtrSet< T, N > &LHS, wpi::SmallPtrSet< T, N > &RHS)
Implement std::swap in terms of SmallPtrSet swap.
Definition: SmallPtrSet.h:512
static constexpr const charge::coulomb_t e(1.6021766208e-19)
elementary charge.
Definition: AprilTagFieldLayout.h:18
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:582
void deallocate_buffer(void *Ptr, size_t Size, size_t Alignment)
Deallocate a buffer of memory with the given size and alignment.
size_t capacity_in_bytes(const DenseMap< KeyT, ValueT, KeyInfoT > &X)
Definition: DenseMap.h:1303
uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition: MathExtras.h:656
bool operator==(const DenseMapBase< DerivedT, KeyT, ValueT, KeyInfoT, BucketT > &LHS, const DenseMapBase< DerivedT, KeyT, ValueT, KeyInfoT, BucketT > &RHS)
Equality comparison for DenseMap.
Definition: DenseMap.h:686
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:469
LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * allocate_buffer(size_t Size, size_t Alignment)
Allocate a buffer of memory with the given size and alignment.
bool operator!=(const DenseMapBase< DerivedT, KeyT, ValueT, KeyInfoT, BucketT > &LHS, const DenseMapBase< DerivedT, KeyT, ValueT, KeyInfoT, BucketT > &RHS)
Inequality comparison for DenseMap.
Definition: DenseMap.h:706
A suitably aligned and sized character array member which can hold elements of any type.
Definition: AlignOf.h:27
const T & type
Definition: type_traits.h:64
Definition: DenseMap.h:42
KeyT & getFirst()
Definition: DenseMap.h:45
ValueT & getSecond()
Definition: DenseMap.h:47
const KeyT & getFirst() const
Definition: DenseMap.h:46
const ValueT & getSecond() const
Definition: DenseMap.h:48