1//===- sanitizer_dense_map.h - Dense probed hash table ----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is fork of llvm/ADT/DenseMap.h class with the following changes:
10// * Use mmap to allocate.
11// * No iterators.
12// * Does not shrink.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef SANITIZER_DENSE_MAP_H
17#define SANITIZER_DENSE_MAP_H
18
19#include "sanitizer_common.h"
20#include "sanitizer_dense_map_info.h"
21#include "sanitizer_internal_defs.h"
22#include "sanitizer_type_traits.h"
23
24namespace __sanitizer {
25
26template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
27 typename BucketT>
28class DenseMapBase {
29 public:
30 using size_type = unsigned;
31 using key_type = KeyT;
32 using mapped_type = ValueT;
33 using value_type = BucketT;
34
35 WARN_UNUSED_RESULT bool empty() const { return getNumEntries() == 0; }
36 unsigned size() const { return getNumEntries(); }
37
38 /// Grow the densemap so that it can contain at least \p NumEntries items
39 /// before resizing again.
40 void reserve(size_type NumEntries) {
41 auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
42 if (NumBuckets > getNumBuckets())
43 grow(AtLeast: NumBuckets);
44 }
45
46 void clear() {
47 if (getNumEntries() == 0 && getNumTombstones() == 0)
48 return;
49
50 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
51 if (__sanitizer::is_trivially_destructible<ValueT>::value) {
52 // Use a simpler loop when values don't need destruction.
53 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
54 P->getFirst() = EmptyKey;
55 } else {
56 unsigned NumEntries = getNumEntries();
57 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
58 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
59 if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
60 P->getSecond().~ValueT();
61 --NumEntries;
62 }
63 P->getFirst() = EmptyKey;
64 }
65 }
66 CHECK_EQ(NumEntries, 0);
67 }
68 setNumEntries(0);
69 setNumTombstones(0);
70 }
71
72 /// Return 1 if the specified key is in the map, 0 otherwise.
73 size_type count(const KeyT &Key) const {
74 const BucketT *TheBucket;
75 return LookupBucketFor(Key, TheBucket) ? 1 : 0;
76 }
77
78 value_type *find(const KeyT &Key) {
79 BucketT *TheBucket;
80 if (LookupBucketFor(Key, TheBucket))
81 return TheBucket;
82 return nullptr;
83 }
84 const value_type *find(const KeyT &Key) const {
85 const BucketT *TheBucket;
86 if (LookupBucketFor(Key, TheBucket))
87 return TheBucket;
88 return nullptr;
89 }
90
91 /// Alternate version of find() which allows a different, and possibly
92 /// less expensive, key type.
93 /// The DenseMapInfo is responsible for supplying methods
94 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
95 /// type used.
96 template <class LookupKeyT>
97 value_type *find_as(const LookupKeyT &Key) {
98 BucketT *TheBucket;
99 if (LookupBucketFor(Key, TheBucket))
100 return TheBucket;
101 return nullptr;
102 }
103 template <class LookupKeyT>
104 const value_type *find_as(const LookupKeyT &Key) const {
105 const BucketT *TheBucket;
106 if (LookupBucketFor(Key, TheBucket))
107 return TheBucket;
108 return nullptr;
109 }
110
111 /// lookup - Return the entry for the specified key, or a default
112 /// constructed value if no such entry exists.
113 ValueT lookup(const KeyT &Key) const {
114 const BucketT *TheBucket;
115 if (LookupBucketFor(Key, TheBucket))
116 return TheBucket->getSecond();
117 return ValueT();
118 }
119
120 // Inserts key,value pair into the map if the key isn't already in the map.
121 // If the key is already in the map, it returns false and doesn't update the
122 // value.
123 detail::DenseMapPair<value_type *, bool> insert(const value_type &KV) {
124 return try_emplace(KV.first, KV.second);
125 }
126
127 // Inserts key,value pair into the map if the key isn't already in the map.
128 // If the key is already in the map, it returns false and doesn't update the
129 // value.
130 detail::DenseMapPair<value_type *, bool> insert(value_type &&KV) {
131 return try_emplace(__sanitizer::move(KV.first),
132 __sanitizer::move(KV.second));
133 }
134
135 // Inserts key,value pair into the map if the key isn't already in the map.
136 // The value is constructed in-place if the key is not in the map, otherwise
137 // it is not moved.
138 template <typename... Ts>
139 detail::DenseMapPair<value_type *, bool> try_emplace(KeyT &&Key,
140 Ts &&...Args) {
141 BucketT *TheBucket;
142 if (LookupBucketFor(Key, TheBucket))
143 return {TheBucket, false}; // Already in map.
144
145 // Otherwise, insert the new element.
146 TheBucket = InsertIntoBucket(TheBucket, __sanitizer::move(Key),
147 __sanitizer::forward<Ts>(Args)...);
148 return {TheBucket, true};
149 }
150
151 // Inserts key,value pair into the map if the key isn't already in the map.
152 // The value is constructed in-place if the key is not in the map, otherwise
153 // it is not moved.
154 template <typename... Ts>
155 detail::DenseMapPair<value_type *, bool> try_emplace(const KeyT &Key,
156 Ts &&...Args) {
157 BucketT *TheBucket;
158 if (LookupBucketFor(Key, TheBucket))
159 return {TheBucket, false}; // Already in map.
160
161 // Otherwise, insert the new element.
162 TheBucket =
163 InsertIntoBucket(TheBucket, Key, __sanitizer::forward<Ts>(Args)...);
164 return {TheBucket, true};
165 }
166
167 /// Alternate version of insert() which allows a different, and possibly
168 /// less expensive, key type.
169 /// The DenseMapInfo is responsible for supplying methods
170 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
171 /// type used.
172 template <typename LookupKeyT>
173 detail::DenseMapPair<value_type *, bool> insert_as(value_type &&KV,
174 const LookupKeyT &Val) {
175 BucketT *TheBucket;
176 if (LookupBucketFor(Val, TheBucket))
177 return {TheBucket, false}; // Already in map.
178
179 // Otherwise, insert the new element.
180 TheBucket =
181 InsertIntoBucketWithLookup(TheBucket, __sanitizer::move(KV.first),
182 __sanitizer::move(KV.second), Val);
183 return {TheBucket, true};
184 }
185
186 bool erase(const KeyT &Val) {
187 BucketT *TheBucket;
188 if (!LookupBucketFor(Val, TheBucket))
189 return false; // not in map.
190
191 TheBucket->getSecond().~ValueT();
192 TheBucket->getFirst() = getTombstoneKey();
193 decrementNumEntries();
194 incrementNumTombstones();
195 return true;
196 }
197
198 void erase(value_type *I) {
199 CHECK_NE(I, nullptr);
200 BucketT *TheBucket = &*I;
201 TheBucket->getSecond().~ValueT();
202 TheBucket->getFirst() = getTombstoneKey();
203 decrementNumEntries();
204 incrementNumTombstones();
205 }
206
207 value_type &FindAndConstruct(const KeyT &Key) {
208 BucketT *TheBucket;
209 if (LookupBucketFor(Key, TheBucket))
210 return *TheBucket;
211
212 return *InsertIntoBucket(TheBucket, Key);
213 }
214
215 ValueT &operator[](const KeyT &Key) { return FindAndConstruct(Key).second; }
216
217 value_type &FindAndConstruct(KeyT &&Key) {
218 BucketT *TheBucket;
219 if (LookupBucketFor(Key, TheBucket))
220 return *TheBucket;
221
222 return *InsertIntoBucket(TheBucket, __sanitizer::move(Key));
223 }
224
225 ValueT &operator[](KeyT &&Key) {
226 return FindAndConstruct(__sanitizer::move(Key)).second;
227 }
228
229 /// Iterate over active entries of the container.
230 ///
231 /// Function can return fast to stop the process.
232 template <class Fn>
233 void forEach(Fn fn) {
234 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
235 for (auto *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
236 const KeyT K = P->getFirst();
237 if (!KeyInfoT::isEqual(K, EmptyKey) &&
238 !KeyInfoT::isEqual(K, TombstoneKey)) {
239 if (!fn(*P))
240 return;
241 }
242 }
243 }
244
245 template <class Fn>
246 void forEach(Fn fn) const {
247 const_cast<DenseMapBase *>(this)->forEach(
248 [&](const value_type &KV) { return fn(KV); });
249 }
250
251 protected:
252 DenseMapBase() = default;
253
254 void destroyAll() {
255 if (getNumBuckets() == 0) // Nothing to do.
256 return;
257
258 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
259 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
260 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
261 !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
262 P->getSecond().~ValueT();
263 P->getFirst().~KeyT();
264 }
265 }
266
267 void initEmpty() {
268 setNumEntries(0);
269 setNumTombstones(0);
270
271 CHECK_EQ((getNumBuckets() & (getNumBuckets() - 1)), 0);
272 const KeyT EmptyKey = getEmptyKey();
273 for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
274 ::new (&B->getFirst()) KeyT(EmptyKey);
275 }
276
277 /// Returns the number of buckets to allocate to ensure that the DenseMap can
278 /// accommodate \p NumEntries without need to grow().
279 unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
280 // Ensure that "NumEntries * 4 < NumBuckets * 3"
281 if (NumEntries == 0)
282 return 0;
283 // +1 is required because of the strict equality.
284 // For example if NumEntries is 48, we need to return 401.
285 return RoundUpToPowerOfTwo(size: (NumEntries * 4 / 3 + 1) + /* NextPowerOf2 */ 1);
286 }
287
288 void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
289 initEmpty();
290
291 // Insert all the old elements.
292 const KeyT EmptyKey = getEmptyKey();
293 const KeyT TombstoneKey = getTombstoneKey();
294 for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
295 if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
296 !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
297 // Insert the key/value into the new table.
298 BucketT *DestBucket;
299 bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
300 (void)FoundVal; // silence warning.
301 CHECK(!FoundVal);
302 DestBucket->getFirst() = __sanitizer::move(B->getFirst());
303 ::new (&DestBucket->getSecond())
304 ValueT(__sanitizer::move(B->getSecond()));
305 incrementNumEntries();
306
307 // Free the value.
308 B->getSecond().~ValueT();
309 }
310 B->getFirst().~KeyT();
311 }
312 }
313
314 template <typename OtherBaseT>
315 void copyFrom(
316 const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
317 CHECK_NE(&other, this);
318 CHECK_EQ(getNumBuckets(), other.getNumBuckets());
319
320 setNumEntries(other.getNumEntries());
321 setNumTombstones(other.getNumTombstones());
322
323 if (__sanitizer::is_trivially_copyable<KeyT>::value &&
324 __sanitizer::is_trivially_copyable<ValueT>::value)
325 internal_memcpy(reinterpret_cast<void *>(getBuckets()),
326 other.getBuckets(), getNumBuckets() * sizeof(BucketT));
327 else
328 for (uptr i = 0; i < getNumBuckets(); ++i) {
329 ::new (&getBuckets()[i].getFirst())
330 KeyT(other.getBuckets()[i].getFirst());
331 if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
332 !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
333 ::new (&getBuckets()[i].getSecond())
334 ValueT(other.getBuckets()[i].getSecond());
335 }
336 }
337
338 static unsigned getHashValue(const KeyT &Val) {
339 return KeyInfoT::getHashValue(Val);
340 }
341
342 template <typename LookupKeyT>
343 static unsigned getHashValue(const LookupKeyT &Val) {
344 return KeyInfoT::getHashValue(Val);
345 }
346
347 static const KeyT getEmptyKey() { return KeyInfoT::getEmptyKey(); }
348
349 static const KeyT getTombstoneKey() { return KeyInfoT::getTombstoneKey(); }
350
351 private:
352 unsigned getNumEntries() const {
353 return static_cast<const DerivedT *>(this)->getNumEntries();
354 }
355
356 void setNumEntries(unsigned Num) {
357 static_cast<DerivedT *>(this)->setNumEntries(Num);
358 }
359
360 void incrementNumEntries() { setNumEntries(getNumEntries() + 1); }
361
362 void decrementNumEntries() { setNumEntries(getNumEntries() - 1); }
363
364 unsigned getNumTombstones() const {
365 return static_cast<const DerivedT *>(this)->getNumTombstones();
366 }
367
368 void setNumTombstones(unsigned Num) {
369 static_cast<DerivedT *>(this)->setNumTombstones(Num);
370 }
371
372 void incrementNumTombstones() { setNumTombstones(getNumTombstones() + 1); }
373
374 void decrementNumTombstones() { setNumTombstones(getNumTombstones() - 1); }
375
376 const BucketT *getBuckets() const {
377 return static_cast<const DerivedT *>(this)->getBuckets();
378 }
379
380 BucketT *getBuckets() { return static_cast<DerivedT *>(this)->getBuckets(); }
381
382 unsigned getNumBuckets() const {
383 return static_cast<const DerivedT *>(this)->getNumBuckets();
384 }
385
386 BucketT *getBucketsEnd() { return getBuckets() + getNumBuckets(); }
387
388 const BucketT *getBucketsEnd() const {
389 return getBuckets() + getNumBuckets();
390 }
391
392 void grow(unsigned AtLeast) { static_cast<DerivedT *>(this)->grow(AtLeast); }
393
394 template <typename KeyArg, typename... ValueArgs>
395 BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
396 ValueArgs &&...Values) {
397 TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
398
399 TheBucket->getFirst() = __sanitizer::forward<KeyArg>(Key);
400 ::new (&TheBucket->getSecond())
401 ValueT(__sanitizer::forward<ValueArgs>(Values)...);
402 return TheBucket;
403 }
404
405 template <typename LookupKeyT>
406 BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
407 ValueT &&Value, LookupKeyT &Lookup) {
408 TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
409
410 TheBucket->getFirst() = __sanitizer::move(Key);
411 ::new (&TheBucket->getSecond()) ValueT(__sanitizer::move(Value));
412 return TheBucket;
413 }
414
415 template <typename LookupKeyT>
416 BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
417 BucketT *TheBucket) {
418 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
419 // the buckets are empty (meaning that many are filled with tombstones),
420 // grow the table.
421 //
422 // The later case is tricky. For example, if we had one empty bucket with
423 // tons of tombstones, failing lookups (e.g. for insertion) would have to
424 // probe almost the entire table until it found the empty bucket. If the
425 // table completely filled with tombstones, no lookup would ever succeed,
426 // causing infinite loops in lookup.
427 unsigned NewNumEntries = getNumEntries() + 1;
428 unsigned NumBuckets = getNumBuckets();
429 if (UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
430 this->grow(NumBuckets * 2);
431 LookupBucketFor(Lookup, TheBucket);
432 NumBuckets = getNumBuckets();
433 } else if (UNLIKELY(NumBuckets - (NewNumEntries + getNumTombstones()) <=
434 NumBuckets / 8)) {
435 this->grow(NumBuckets);
436 LookupBucketFor(Lookup, TheBucket);
437 }
438 CHECK(TheBucket);
439
440 // Only update the state after we've grown our bucket space appropriately
441 // so that when growing buckets we have self-consistent entry count.
442 incrementNumEntries();
443
444 // If we are writing over a tombstone, remember this.
445 const KeyT EmptyKey = getEmptyKey();
446 if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
447 decrementNumTombstones();
448
449 return TheBucket;
450 }
451
452 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
453 /// FoundBucket. If the bucket contains the key and a value, this returns
454 /// true, otherwise it returns a bucket with an empty marker or tombstone and
455 /// returns false.
456 template <typename LookupKeyT>
457 bool LookupBucketFor(const LookupKeyT &Val,
458 const BucketT *&FoundBucket) const {
459 const BucketT *BucketsPtr = getBuckets();
460 const unsigned NumBuckets = getNumBuckets();
461
462 if (NumBuckets == 0) {
463 FoundBucket = nullptr;
464 return false;
465 }
466
467 // FoundTombstone - Keep track of whether we find a tombstone while probing.
468 const BucketT *FoundTombstone = nullptr;
469 const KeyT EmptyKey = getEmptyKey();
470 const KeyT TombstoneKey = getTombstoneKey();
471 CHECK(!KeyInfoT::isEqual(Val, EmptyKey));
472 CHECK(!KeyInfoT::isEqual(Val, TombstoneKey));
473
474 unsigned BucketNo = getHashValue(Val) & (NumBuckets - 1);
475 unsigned ProbeAmt = 1;
476 while (true) {
477 const BucketT *ThisBucket = BucketsPtr + BucketNo;
478 // Found Val's bucket? If so, return it.
479 if (LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
480 FoundBucket = ThisBucket;
481 return true;
482 }
483
484 // If we found an empty bucket, the key doesn't exist in the set.
485 // Insert it and return the default value.
486 if (LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
487 // If we've already seen a tombstone while probing, fill it in instead
488 // of the empty bucket we eventually probed to.
489 FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
490 return false;
491 }
492
493 // If this is a tombstone, remember it. If Val ends up not in the map, we
494 // prefer to return it than something that would require more probing.
495 if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
496 !FoundTombstone)
497 FoundTombstone = ThisBucket; // Remember the first tombstone found.
498
499 // Otherwise, it's a hash collision or a tombstone, continue quadratic
500 // probing.
501 BucketNo += ProbeAmt++;
502 BucketNo &= (NumBuckets - 1);
503 }
504 }
505
506 template <typename LookupKeyT>
507 bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
508 const BucketT *ConstFoundBucket;
509 bool Result = const_cast<const DenseMapBase *>(this)->LookupBucketFor(
510 Val, ConstFoundBucket);
511 FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
512 return Result;
513 }
514
515 public:
516 /// Return the approximate size (in bytes) of the actual map.
517 /// This is just the raw memory used by DenseMap.
518 /// If entries are pointers to objects, the size of the referenced objects
519 /// are not included.
520 uptr getMemorySize() const {
521 return RoundUpTo(getNumBuckets() * sizeof(BucketT), GetPageSizeCached());
522 }
523};
524
525/// Equality comparison for DenseMap.
526///
527/// Iterates over elements of LHS confirming that each (key, value) pair in LHS
528/// is also in RHS, and that no additional pairs are in RHS.
529/// Equivalent to N calls to RHS.find and N value comparisons. Amortized
530/// complexity is linear, worst case is O(N^2) (if every hash collides).
531template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
532 typename BucketT>
533bool operator==(
534 const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
535 const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
536 if (LHS.size() != RHS.size())
537 return false;
538
539 bool R = true;
540 LHS.forEach(
541 [&](const typename DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT,
542 BucketT>::value_type &KV) -> bool {
543 const auto *I = RHS.find(KV.first);
544 if (!I || I->second != KV.second) {
545 R = false;
546 return false;
547 }
548 return true;
549 });
550
551 return R;
552}
553
554/// Inequality comparison for DenseMap.
555///
556/// Equivalent to !(LHS == RHS). See operator== for performance notes.
557template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
558 typename BucketT>
559bool operator!=(
560 const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
561 const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
562 return !(LHS == RHS);
563}
564
565template <typename KeyT, typename ValueT,
566 typename KeyInfoT = DenseMapInfo<KeyT>,
567 typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
568class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
569 KeyT, ValueT, KeyInfoT, BucketT> {
570 friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
571
572 // Lift some types from the dependent base class into this class for
573 // simplicity of referring to them.
574 using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
575
576 BucketT *Buckets = nullptr;
577 unsigned NumEntries = 0;
578 unsigned NumTombstones = 0;
579 unsigned NumBuckets = 0;
580
581 public:
582 /// Create a DenseMap with an optional \p InitialReserve that guarantee that
583 /// this number of elements can be inserted in the map without grow()
584 explicit DenseMap(unsigned InitialReserve) { init(InitNumEntries: InitialReserve); }
585 constexpr DenseMap() = default;
586
587 DenseMap(const DenseMap &other) : BaseT() {
588 init(InitNumEntries: 0);
589 copyFrom(other);
590 }
591
592 DenseMap(DenseMap &&other) : BaseT() {
593 init(InitNumEntries: 0);
594 swap(RHS&: other);
595 }
596
597 ~DenseMap() {
598 this->destroyAll();
599 deallocate_buffer(Ptr: Buckets, Size: sizeof(BucketT) * NumBuckets);
600 }
601
602 void swap(DenseMap &RHS) {
603 Swap(Buckets, RHS.Buckets);
604 Swap(NumEntries, RHS.NumEntries);
605 Swap(NumTombstones, RHS.NumTombstones);
606 Swap(NumBuckets, RHS.NumBuckets);
607 }
608
609 DenseMap &operator=(const DenseMap &other) {
610 if (&other != this)
611 copyFrom(other);
612 return *this;
613 }
614
615 DenseMap &operator=(DenseMap &&other) {
616 this->destroyAll();
617 deallocate_buffer(Ptr: Buckets, Size: sizeof(BucketT) * NumBuckets, alignof(BucketT));
618 init(InitNumEntries: 0);
619 swap(RHS&: other);
620 return *this;
621 }
622
623 void copyFrom(const DenseMap &other) {
624 this->destroyAll();
625 deallocate_buffer(Ptr: Buckets, Size: sizeof(BucketT) * NumBuckets);
626 if (allocateBuckets(Num: other.NumBuckets)) {
627 this->BaseT::copyFrom(other);
628 } else {
629 NumEntries = 0;
630 NumTombstones = 0;
631 }
632 }
633
634 void init(unsigned InitNumEntries) {
635 auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
636 if (allocateBuckets(Num: InitBuckets)) {
637 this->BaseT::initEmpty();
638 } else {
639 NumEntries = 0;
640 NumTombstones = 0;
641 }
642 }
643
644 void grow(unsigned AtLeast) {
645 unsigned OldNumBuckets = NumBuckets;
646 BucketT *OldBuckets = Buckets;
647
648 allocateBuckets(Num: RoundUpToPowerOfTwo(size: Max<unsigned>(a: 64, b: AtLeast)));
649 CHECK(Buckets);
650 if (!OldBuckets) {
651 this->BaseT::initEmpty();
652 return;
653 }
654
655 this->moveFromOldBuckets(OldBuckets, OldBuckets + OldNumBuckets);
656
657 // Free the old table.
658 deallocate_buffer(Ptr: OldBuckets, Size: sizeof(BucketT) * OldNumBuckets);
659 }
660
661 private:
662 unsigned getNumEntries() const { return NumEntries; }
663
664 void setNumEntries(unsigned Num) { NumEntries = Num; }
665
666 unsigned getNumTombstones() const { return NumTombstones; }
667
668 void setNumTombstones(unsigned Num) { NumTombstones = Num; }
669
670 BucketT *getBuckets() const { return Buckets; }
671
672 unsigned getNumBuckets() const { return NumBuckets; }
673
674 bool allocateBuckets(unsigned Num) {
675 NumBuckets = Num;
676 if (NumBuckets == 0) {
677 Buckets = nullptr;
678 return false;
679 }
680
681 uptr Size = sizeof(BucketT) * NumBuckets;
682 if (Size * 2 <= GetPageSizeCached()) {
683 // We always allocate at least a page, so use entire space.
684 unsigned Log2 = MostSignificantSetBitIndex(x: GetPageSizeCached() / Size);
685 Size <<= Log2;
686 NumBuckets <<= Log2;
687 CHECK_EQ(Size, sizeof(BucketT) * NumBuckets);
688 CHECK_GT(Size * 2, GetPageSizeCached());
689 }
690 Buckets = static_cast<BucketT *>(allocate_buffer(Size));
691 return true;
692 }
693
694 static void *allocate_buffer(uptr Size) {
695 return MmapOrDie(size: RoundUpTo(size: Size, boundary: GetPageSizeCached()), mem_type: "DenseMap");
696 }
697
698 static void deallocate_buffer(void *Ptr, uptr Size) {
699 UnmapOrDie(addr: Ptr, size: RoundUpTo(size: Size, boundary: GetPageSizeCached()));
700 }
701};
702
703} // namespace __sanitizer
704
705#endif // SANITIZER_DENSE_MAP_H
706

source code of compiler-rt/lib/sanitizer_common/sanitizer_dense_map.h