1// hashtable.h header -*- C++ -*-
2
3// Copyright (C) 2007-2021 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file bits/hashtable.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{unordered_map, unordered_set}
28 */
29
30#ifndef _HASHTABLE_H
31#define _HASHTABLE_H 1
32
33#pragma GCC system_header
34
35#include <bits/hashtable_policy.h>
36#include <bits/enable_special_members.h>
37#if __cplusplus > 201402L
38# include <bits/node_handle.h>
39#endif
40
41namespace std _GLIBCXX_VISIBILITY(default)
42{
43_GLIBCXX_BEGIN_NAMESPACE_VERSION
44/// @cond undocumented
45
46 template<typename _Tp, typename _Hash>
47 using __cache_default
48 = __not_<__and_<// Do not cache for fast hasher.
49 __is_fast_hash<_Hash>,
50 // Mandatory to have erase not throwing.
51 __is_nothrow_invocable<const _Hash&, const _Tp&>>>;
52
53 // Helper to conditionally delete the default constructor.
54 // The _Hash_node_base type is used to distinguish this specialization
55 // from any other potentially-overlapping subobjects of the hashtable.
56 template<typename _Equal, typename _Hash, typename _Allocator>
57 using _Hashtable_enable_default_ctor
58 = _Enable_default_constructor<__and_<is_default_constructible<_Equal>,
59 is_default_constructible<_Hash>,
60 is_default_constructible<_Allocator>>{},
61 __detail::_Hash_node_base>;
62
63 /**
64 * Primary class template _Hashtable.
65 *
66 * @ingroup hashtable-detail
67 *
68 * @tparam _Value CopyConstructible type.
69 *
70 * @tparam _Key CopyConstructible type.
71 *
72 * @tparam _Alloc An allocator type
73 * ([lib.allocator.requirements]) whose _Alloc::value_type is
74 * _Value. As a conforming extension, we allow for
75 * _Alloc::value_type != _Value.
76 *
77 * @tparam _ExtractKey Function object that takes an object of type
78 * _Value and returns a value of type _Key.
79 *
80 * @tparam _Equal Function object that takes two objects of type k
81 * and returns a bool-like value that is true if the two objects
82 * are considered equal.
83 *
84 * @tparam _Hash The hash function. A unary function object with
85 * argument type _Key and result type size_t. Return values should
86 * be distributed over the entire range [0, numeric_limits<size_t>:::max()].
87 *
88 * @tparam _RangeHash The range-hashing function (in the terminology of
89 * Tavori and Dreizin). A binary function object whose argument
90 * types and result type are all size_t. Given arguments r and N,
91 * the return value is in the range [0, N).
92 *
93 * @tparam _Unused Not used.
94 *
95 * @tparam _RehashPolicy Policy class with three members, all of
96 * which govern the bucket count. _M_next_bkt(n) returns a bucket
97 * count no smaller than n. _M_bkt_for_elements(n) returns a
98 * bucket count appropriate for an element count of n.
99 * _M_need_rehash(n_bkt, n_elt, n_ins) determines whether, if the
100 * current bucket count is n_bkt and the current element count is
101 * n_elt, we need to increase the bucket count for n_ins insertions.
102 * If so, returns make_pair(true, n), where n is the new bucket count. If
103 * not, returns make_pair(false, <anything>)
104 *
105 * @tparam _Traits Compile-time class with three boolean
106 * std::integral_constant members: __cache_hash_code, __constant_iterators,
107 * __unique_keys.
108 *
109 * Each _Hashtable data structure has:
110 *
111 * - _Bucket[] _M_buckets
112 * - _Hash_node_base _M_before_begin
113 * - size_type _M_bucket_count
114 * - size_type _M_element_count
115 *
116 * with _Bucket being _Hash_node_base* and _Hash_node containing:
117 *
118 * - _Hash_node* _M_next
119 * - Tp _M_value
120 * - size_t _M_hash_code if cache_hash_code is true
121 *
122 * In terms of Standard containers the hashtable is like the aggregation of:
123 *
124 * - std::forward_list<_Node> containing the elements
125 * - std::vector<std::forward_list<_Node>::iterator> representing the buckets
126 *
127 * The non-empty buckets contain the node before the first node in the
128 * bucket. This design makes it possible to implement something like a
129 * std::forward_list::insert_after on container insertion and
130 * std::forward_list::erase_after on container erase
131 * calls. _M_before_begin is equivalent to
132 * std::forward_list::before_begin. Empty buckets contain
133 * nullptr. Note that one of the non-empty buckets contains
134 * &_M_before_begin which is not a dereferenceable node so the
135 * node pointer in a bucket shall never be dereferenced, only its
136 * next node can be.
137 *
138 * Walking through a bucket's nodes requires a check on the hash code to
139 * see if each node is still in the bucket. Such a design assumes a
140 * quite efficient hash functor and is one of the reasons it is
141 * highly advisable to set __cache_hash_code to true.
142 *
143 * The container iterators are simply built from nodes. This way
144 * incrementing the iterator is perfectly efficient independent of
145 * how many empty buckets there are in the container.
146 *
147 * On insert we compute the element's hash code and use it to find the
148 * bucket index. If the element must be inserted in an empty bucket
149 * we add it at the beginning of the singly linked list and make the
150 * bucket point to _M_before_begin. The bucket that used to point to
151 * _M_before_begin, if any, is updated to point to its new before
152 * begin node.
153 *
154 * On erase, the simple iterator design requires using the hash
155 * functor to get the index of the bucket to update. For this
156 * reason, when __cache_hash_code is set to false the hash functor must
157 * not throw and this is enforced by a static assertion.
158 *
159 * Functionality is implemented by decomposition into base classes,
160 * where the derived _Hashtable class is used in _Map_base,
161 * _Insert, _Rehash_base, and _Equality base classes to access the
162 * "this" pointer. _Hashtable_base is used in the base classes as a
163 * non-recursive, fully-completed-type so that detailed nested type
164 * information, such as iterator type and node type, can be
165 * used. This is similar to the "Curiously Recurring Template
166 * Pattern" (CRTP) technique, but uses a reconstructed, not
167 * explicitly passed, template pattern.
168 *
169 * Base class templates are:
170 * - __detail::_Hashtable_base
171 * - __detail::_Map_base
172 * - __detail::_Insert
173 * - __detail::_Rehash_base
174 * - __detail::_Equality
175 */
176 template<typename _Key, typename _Value, typename _Alloc,
177 typename _ExtractKey, typename _Equal,
178 typename _Hash, typename _RangeHash, typename _Unused,
179 typename _RehashPolicy, typename _Traits>
180 class _Hashtable
181 : public __detail::_Hashtable_base<_Key, _Value, _ExtractKey, _Equal,
182 _Hash, _RangeHash, _Unused, _Traits>,
183 public __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
184 _Hash, _RangeHash, _Unused,
185 _RehashPolicy, _Traits>,
186 public __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey, _Equal,
187 _Hash, _RangeHash, _Unused,
188 _RehashPolicy, _Traits>,
189 public __detail::_Rehash_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
190 _Hash, _RangeHash, _Unused,
191 _RehashPolicy, _Traits>,
192 public __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey, _Equal,
193 _Hash, _RangeHash, _Unused,
194 _RehashPolicy, _Traits>,
195 private __detail::_Hashtable_alloc<
196 __alloc_rebind<_Alloc,
197 __detail::_Hash_node<_Value,
198 _Traits::__hash_cached::value>>>,
199 private _Hashtable_enable_default_ctor<_Equal, _Hash, _Alloc>
200 {
201 static_assert(is_same<typename remove_cv<_Value>::type, _Value>::value,
202 "unordered container must have a non-const, non-volatile value_type");
203#if __cplusplus > 201703L || defined __STRICT_ANSI__
204 static_assert(is_same<typename _Alloc::value_type, _Value>{},
205 "unordered container must have the same value_type as its allocator");
206#endif
207
208 using __traits_type = _Traits;
209 using __hash_cached = typename __traits_type::__hash_cached;
210 using __constant_iterators = typename __traits_type::__constant_iterators;
211 using __node_type = __detail::_Hash_node<_Value, __hash_cached::value>;
212 using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
213
214 using __hashtable_alloc = __detail::_Hashtable_alloc<__node_alloc_type>;
215
216 using __node_value_type =
217 __detail::_Hash_node_value<_Value, __hash_cached::value>;
218 using __node_ptr = typename __hashtable_alloc::__node_ptr;
219 using __value_alloc_traits =
220 typename __hashtable_alloc::__value_alloc_traits;
221 using __node_alloc_traits =
222 typename __hashtable_alloc::__node_alloc_traits;
223 using __node_base = typename __hashtable_alloc::__node_base;
224 using __node_base_ptr = typename __hashtable_alloc::__node_base_ptr;
225 using __buckets_ptr = typename __hashtable_alloc::__buckets_ptr;
226
227 using __insert_base = __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey,
228 _Equal, _Hash,
229 _RangeHash, _Unused,
230 _RehashPolicy, _Traits>;
231 using __enable_default_ctor
232 = _Hashtable_enable_default_ctor<_Equal, _Hash, _Alloc>;
233
234 public:
235 typedef _Key key_type;
236 typedef _Value value_type;
237 typedef _Alloc allocator_type;
238 typedef _Equal key_equal;
239
240 // mapped_type, if present, comes from _Map_base.
241 // hasher, if present, comes from _Hash_code_base/_Hashtable_base.
242 typedef typename __value_alloc_traits::pointer pointer;
243 typedef typename __value_alloc_traits::const_pointer const_pointer;
244 typedef value_type& reference;
245 typedef const value_type& const_reference;
246
247 using iterator = typename __insert_base::iterator;
248
249 using const_iterator = typename __insert_base::const_iterator;
250
251 using local_iterator = __detail::_Local_iterator<key_type, _Value,
252 _ExtractKey, _Hash, _RangeHash, _Unused,
253 __constant_iterators::value,
254 __hash_cached::value>;
255
256 using const_local_iterator = __detail::_Local_const_iterator<
257 key_type, _Value,
258 _ExtractKey, _Hash, _RangeHash, _Unused,
259 __constant_iterators::value, __hash_cached::value>;
260
261 private:
262 using __rehash_type = _RehashPolicy;
263 using __rehash_state = typename __rehash_type::_State;
264
265 using __unique_keys = typename __traits_type::__unique_keys;
266
267 using __hashtable_base = __detail::
268 _Hashtable_base<_Key, _Value, _ExtractKey,
269 _Equal, _Hash, _RangeHash, _Unused, _Traits>;
270
271 using __hash_code_base = typename __hashtable_base::__hash_code_base;
272 using __hash_code = typename __hashtable_base::__hash_code;
273 using __ireturn_type = typename __insert_base::__ireturn_type;
274
275 using __map_base = __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey,
276 _Equal, _Hash, _RangeHash, _Unused,
277 _RehashPolicy, _Traits>;
278
279 using __rehash_base = __detail::_Rehash_base<_Key, _Value, _Alloc,
280 _ExtractKey, _Equal,
281 _Hash, _RangeHash, _Unused,
282 _RehashPolicy, _Traits>;
283
284 using __eq_base = __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey,
285 _Equal, _Hash, _RangeHash, _Unused,
286 _RehashPolicy, _Traits>;
287
288 using __reuse_or_alloc_node_gen_t =
289 __detail::_ReuseOrAllocNode<__node_alloc_type>;
290 using __alloc_node_gen_t =
291 __detail::_AllocNode<__node_alloc_type>;
292
293 // Simple RAII type for managing a node containing an element
294 struct _Scoped_node
295 {
296 // Take ownership of a node with a constructed element.
297 _Scoped_node(__node_ptr __n, __hashtable_alloc* __h)
298 : _M_h(__h), _M_node(__n) { }
299
300 // Allocate a node and construct an element within it.
301 template<typename... _Args>
302 _Scoped_node(__hashtable_alloc* __h, _Args&&... __args)
303 : _M_h(__h),
304 _M_node(__h->_M_allocate_node(std::forward<_Args>(__args)...))
305 { }
306
307 // Destroy element and deallocate node.
308 ~_Scoped_node() { if (_M_node) _M_h->_M_deallocate_node(_M_node); };
309
310 _Scoped_node(const _Scoped_node&) = delete;
311 _Scoped_node& operator=(const _Scoped_node&) = delete;
312
313 __hashtable_alloc* _M_h;
314 __node_ptr _M_node;
315 };
316
317 template<typename _Ht>
318 static constexpr
319 typename conditional<std::is_lvalue_reference<_Ht>::value,
320 const value_type&, value_type&&>::type
321 __fwd_value_for(value_type& __val) noexcept
322 { return std::move(__val); }
323
324 // Compile-time diagnostics.
325
326 // _Hash_code_base has everything protected, so use this derived type to
327 // access it.
328 struct __hash_code_base_access : __hash_code_base
329 { using __hash_code_base::_M_bucket_index; };
330
331 // Getting a bucket index from a node shall not throw because it is used
332 // in methods (erase, swap...) that shall not throw.
333 static_assert(noexcept(declval<const __hash_code_base_access&>()
334 ._M_bucket_index(declval<const __node_value_type&>(),
335 (std::size_t)0)),
336 "Cache the hash code or qualify your functors involved"
337 " in hash code and bucket index computation with noexcept");
338
339 // To get bucket index we need _RangeHash not to throw.
340 static_assert(is_nothrow_default_constructible<_RangeHash>::value,
341 "Functor used to map hash code to bucket index"
342 " must be nothrow default constructible");
343 static_assert(noexcept(
344 std::declval<const _RangeHash&>()((std::size_t)0, (std::size_t)0)),
345 "Functor used to map hash code to bucket index must be"
346 " noexcept");
347
348 // To compute bucket index we also need _ExtratKey not to throw.
349 static_assert(is_nothrow_default_constructible<_ExtractKey>::value,
350 "_ExtractKey must be nothrow default constructible");
351 static_assert(noexcept(
352 std::declval<const _ExtractKey&>()(std::declval<_Value>())),
353 "_ExtractKey functor must be noexcept invocable");
354
355 template<typename _Keya, typename _Valuea, typename _Alloca,
356 typename _ExtractKeya, typename _Equala,
357 typename _Hasha, typename _RangeHasha, typename _Unuseda,
358 typename _RehashPolicya, typename _Traitsa,
359 bool _Unique_keysa>
360 friend struct __detail::_Map_base;
361
362 template<typename _Keya, typename _Valuea, typename _Alloca,
363 typename _ExtractKeya, typename _Equala,
364 typename _Hasha, typename _RangeHasha, typename _Unuseda,
365 typename _RehashPolicya, typename _Traitsa>
366 friend struct __detail::_Insert_base;
367
368 template<typename _Keya, typename _Valuea, typename _Alloca,
369 typename _ExtractKeya, typename _Equala,
370 typename _Hasha, typename _RangeHasha, typename _Unuseda,
371 typename _RehashPolicya, typename _Traitsa,
372 bool _Constant_iteratorsa>
373 friend struct __detail::_Insert;
374
375 template<typename _Keya, typename _Valuea, typename _Alloca,
376 typename _ExtractKeya, typename _Equala,
377 typename _Hasha, typename _RangeHasha, typename _Unuseda,
378 typename _RehashPolicya, typename _Traitsa,
379 bool _Unique_keysa>
380 friend struct __detail::_Equality;
381
382 public:
383 using size_type = typename __hashtable_base::size_type;
384 using difference_type = typename __hashtable_base::difference_type;
385
386#if __cplusplus > 201402L
387 using node_type = _Node_handle<_Key, _Value, __node_alloc_type>;
388 using insert_return_type = _Node_insert_return<iterator, node_type>;
389#endif
390
391 private:
392 __buckets_ptr _M_buckets = &_M_single_bucket;
393 size_type _M_bucket_count = 1;
394 __node_base _M_before_begin;
395 size_type _M_element_count = 0;
396 _RehashPolicy _M_rehash_policy;
397
398 // A single bucket used when only need for 1 bucket. Especially
399 // interesting in move semantic to leave hashtable with only 1 bucket
400 // which is not allocated so that we can have those operations noexcept
401 // qualified.
402 // Note that we can't leave hashtable with 0 bucket without adding
403 // numerous checks in the code to avoid 0 modulus.
404 __node_base_ptr _M_single_bucket = nullptr;
405
406 void
407 _M_update_bbegin()
408 {
409 if (_M_begin())
410 _M_buckets[_M_bucket_index(*_M_begin())] = &_M_before_begin;
411 }
412
413 void
414 _M_update_bbegin(__node_ptr __n)
415 {
416 _M_before_begin._M_nxt = __n;
417 _M_update_bbegin();
418 }
419
420 bool
421 _M_uses_single_bucket(__buckets_ptr __bkts) const
422 { return __builtin_expect(__bkts == &_M_single_bucket, false); }
423
424 bool
425 _M_uses_single_bucket() const
426 { return _M_uses_single_bucket(_M_buckets); }
427
428 __hashtable_alloc&
429 _M_base_alloc() { return *this; }
430
431 __buckets_ptr
432 _M_allocate_buckets(size_type __bkt_count)
433 {
434 if (__builtin_expect(__bkt_count == 1, false))
435 {
436 _M_single_bucket = nullptr;
437 return &_M_single_bucket;
438 }
439
440 return __hashtable_alloc::_M_allocate_buckets(__bkt_count);
441 }
442
443 void
444 _M_deallocate_buckets(__buckets_ptr __bkts, size_type __bkt_count)
445 {
446 if (_M_uses_single_bucket(__bkts))
447 return;
448
449 __hashtable_alloc::_M_deallocate_buckets(__bkts, __bkt_count);
450 }
451
452 void
453 _M_deallocate_buckets()
454 { _M_deallocate_buckets(_M_buckets, _M_bucket_count); }
455
456 // Gets bucket begin, deals with the fact that non-empty buckets contain
457 // their before begin node.
458 __node_ptr
459 _M_bucket_begin(size_type __bkt) const;
460
461 __node_ptr
462 _M_begin() const
463 { return static_cast<__node_ptr>(_M_before_begin._M_nxt); }
464
465 // Assign *this using another _Hashtable instance. Whether elements
466 // are copied or moved depends on the _Ht reference.
467 template<typename _Ht>
468 void
469 _M_assign_elements(_Ht&&);
470
471 template<typename _Ht, typename _NodeGenerator>
472 void
473 _M_assign(_Ht&&, const _NodeGenerator&);
474
475 void
476 _M_move_assign(_Hashtable&&, true_type);
477
478 void
479 _M_move_assign(_Hashtable&&, false_type);
480
481 void
482 _M_reset() noexcept;
483
484 _Hashtable(const _Hash& __h, const _Equal& __eq,
485 const allocator_type& __a)
486 : __hashtable_base(__h, __eq),
487 __hashtable_alloc(__node_alloc_type(__a)),
488 __enable_default_ctor(_Enable_default_constructor_tag{})
489 { }
490
491 template<bool _No_realloc = true>
492 static constexpr bool
493 _S_nothrow_move()
494 {
495#if __cplusplus <= 201402L
496 return __and_<__bool_constant<_No_realloc>,
497 is_nothrow_copy_constructible<_Hash>,
498 is_nothrow_copy_constructible<_Equal>>::value;
499#else
500 if constexpr (_No_realloc)
501 if constexpr (is_nothrow_copy_constructible<_Hash>())
502 return is_nothrow_copy_constructible<_Equal>();
503 return false;
504#endif
505 }
506
507 _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
508 true_type /* alloc always equal */)
509 noexcept(_S_nothrow_move());
510
511 _Hashtable(_Hashtable&&, __node_alloc_type&&,
512 false_type /* alloc always equal */);
513
514 template<typename _InputIterator>
515 _Hashtable(_InputIterator __first, _InputIterator __last,
516 size_type __bkt_count_hint,
517 const _Hash&, const _Equal&, const allocator_type&,
518 true_type __uks);
519
520 template<typename _InputIterator>
521 _Hashtable(_InputIterator __first, _InputIterator __last,
522 size_type __bkt_count_hint,
523 const _Hash&, const _Equal&, const allocator_type&,
524 false_type __uks);
525
526 public:
527 // Constructor, destructor, assignment, swap
528 _Hashtable() = default;
529
530 _Hashtable(const _Hashtable&);
531
532 _Hashtable(const _Hashtable&, const allocator_type&);
533
534 explicit
535 _Hashtable(size_type __bkt_count_hint,
536 const _Hash& __hf = _Hash(),
537 const key_equal& __eql = key_equal(),
538 const allocator_type& __a = allocator_type());
539
540 // Use delegating constructors.
541 _Hashtable(_Hashtable&& __ht)
542 noexcept(_S_nothrow_move())
543 : _Hashtable(std::move(__ht), std::move(__ht._M_node_allocator()),
544 true_type{})
545 { }
546
547 _Hashtable(_Hashtable&& __ht, const allocator_type& __a)
548 noexcept(_S_nothrow_move<__node_alloc_traits::_S_always_equal()>())
549 : _Hashtable(std::move(__ht), __node_alloc_type(__a),
550 typename __node_alloc_traits::is_always_equal{})
551 { }
552
553 explicit
554 _Hashtable(const allocator_type& __a)
555 : __hashtable_alloc(__node_alloc_type(__a)),
556 __enable_default_ctor(_Enable_default_constructor_tag{})
557 { }
558
559 template<typename _InputIterator>
560 _Hashtable(_InputIterator __f, _InputIterator __l,
561 size_type __bkt_count_hint = 0,
562 const _Hash& __hf = _Hash(),
563 const key_equal& __eql = key_equal(),
564 const allocator_type& __a = allocator_type())
565 : _Hashtable(__f, __l, __bkt_count_hint, __hf, __eql, __a,
566 __unique_keys{})
567 { }
568
569 _Hashtable(initializer_list<value_type> __l,
570 size_type __bkt_count_hint = 0,
571 const _Hash& __hf = _Hash(),
572 const key_equal& __eql = key_equal(),
573 const allocator_type& __a = allocator_type())
574 : _Hashtable(__l.begin(), __l.end(), __bkt_count_hint,
575 __hf, __eql, __a, __unique_keys{})
576 { }
577
578 _Hashtable&
579 operator=(const _Hashtable& __ht);
580
581 _Hashtable&
582 operator=(_Hashtable&& __ht)
583 noexcept(__node_alloc_traits::_S_nothrow_move()
584 && is_nothrow_move_assignable<_Hash>::value
585 && is_nothrow_move_assignable<_Equal>::value)
586 {
587 constexpr bool __move_storage =
588 __node_alloc_traits::_S_propagate_on_move_assign()
589 || __node_alloc_traits::_S_always_equal();
590 _M_move_assign(std::move(__ht), __bool_constant<__move_storage>());
591 return *this;
592 }
593
594 _Hashtable&
595 operator=(initializer_list<value_type> __l)
596 {
597 __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
598 _M_before_begin._M_nxt = nullptr;
599 clear();
600
601 // We consider that all elements of __l are going to be inserted.
602 auto __l_bkt_count = _M_rehash_policy._M_bkt_for_elements(__l.size());
603
604 // Do not shrink to keep potential user reservation.
605 if (_M_bucket_count < __l_bkt_count)
606 rehash(bkt_count: __l_bkt_count);
607
608 this->_M_insert_range(__l.begin(), __l.end(), __roan, __unique_keys{});
609 return *this;
610 }
611
612 ~_Hashtable() noexcept;
613
614 void
615 swap(_Hashtable&)
616 noexcept(__and_<__is_nothrow_swappable<_Hash>,
617 __is_nothrow_swappable<_Equal>>::value);
618
619 // Basic container operations
620 iterator
621 begin() noexcept
622 { return iterator(_M_begin()); }
623
624 const_iterator
625 begin() const noexcept
626 { return const_iterator(_M_begin()); }
627
628 iterator
629 end() noexcept
630 { return iterator(nullptr); }
631
632 const_iterator
633 end() const noexcept
634 { return const_iterator(nullptr); }
635
636 const_iterator
637 cbegin() const noexcept
638 { return const_iterator(_M_begin()); }
639
640 const_iterator
641 cend() const noexcept
642 { return const_iterator(nullptr); }
643
644 size_type
645 size() const noexcept
646 { return _M_element_count; }
647
648 _GLIBCXX_NODISCARD bool
649 empty() const noexcept
650 { return size() == 0; }
651
652 allocator_type
653 get_allocator() const noexcept
654 { return allocator_type(this->_M_node_allocator()); }
655
656 size_type
657 max_size() const noexcept
658 { return __node_alloc_traits::max_size(this->_M_node_allocator()); }
659
660 // Observers
661 key_equal
662 key_eq() const
663 { return this->_M_eq(); }
664
665 // hash_function, if present, comes from _Hash_code_base.
666
667 // Bucket operations
668 size_type
669 bucket_count() const noexcept
670 { return _M_bucket_count; }
671
672 size_type
673 max_bucket_count() const noexcept
674 { return max_size(); }
675
676 size_type
677 bucket_size(size_type __bkt) const
678 { return std::distance(begin(__bkt), end(__bkt)); }
679
680 size_type
681 bucket(const key_type& __k) const
682 { return _M_bucket_index(this->_M_hash_code(__k)); }
683
684 local_iterator
685 begin(size_type __bkt)
686 {
687 return local_iterator(*this, _M_bucket_begin(__bkt),
688 __bkt, _M_bucket_count);
689 }
690
691 local_iterator
692 end(size_type __bkt)
693 { return local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
694
695 const_local_iterator
696 begin(size_type __bkt) const
697 {
698 return const_local_iterator(*this, _M_bucket_begin(__bkt),
699 __bkt, _M_bucket_count);
700 }
701
702 const_local_iterator
703 end(size_type __bkt) const
704 { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
705
706 // DR 691.
707 const_local_iterator
708 cbegin(size_type __bkt) const
709 {
710 return const_local_iterator(*this, _M_bucket_begin(__bkt),
711 __bkt, _M_bucket_count);
712 }
713
714 const_local_iterator
715 cend(size_type __bkt) const
716 { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
717
718 float
719 load_factor() const noexcept
720 {
721 return static_cast<float>(size()) / static_cast<float>(bucket_count());
722 }
723
724 // max_load_factor, if present, comes from _Rehash_base.
725
726 // Generalization of max_load_factor. Extension, not found in
727 // TR1. Only useful if _RehashPolicy is something other than
728 // the default.
729 const _RehashPolicy&
730 __rehash_policy() const
731 { return _M_rehash_policy; }
732
733 void
734 __rehash_policy(const _RehashPolicy& __pol)
735 { _M_rehash_policy = __pol; }
736
737 // Lookup.
738 iterator
739 find(const key_type& __k);
740
741 const_iterator
742 find(const key_type& __k) const;
743
744 size_type
745 count(const key_type& __k) const;
746
747 std::pair<iterator, iterator>
748 equal_range(const key_type& __k);
749
750 std::pair<const_iterator, const_iterator>
751 equal_range(const key_type& __k) const;
752
753#if __cplusplus >= 202002L
754#define __cpp_lib_generic_unordered_lookup 201811L
755
756 template<typename _Kt,
757 typename = __has_is_transparent_t<_Hash, _Kt>,
758 typename = __has_is_transparent_t<_Equal, _Kt>>
759 iterator
760 _M_find_tr(const _Kt& __k);
761
762 template<typename _Kt,
763 typename = __has_is_transparent_t<_Hash, _Kt>,
764 typename = __has_is_transparent_t<_Equal, _Kt>>
765 const_iterator
766 _M_find_tr(const _Kt& __k) const;
767
768 template<typename _Kt,
769 typename = __has_is_transparent_t<_Hash, _Kt>,
770 typename = __has_is_transparent_t<_Equal, _Kt>>
771 size_type
772 _M_count_tr(const _Kt& __k) const;
773
774 template<typename _Kt,
775 typename = __has_is_transparent_t<_Hash, _Kt>,
776 typename = __has_is_transparent_t<_Equal, _Kt>>
777 pair<iterator, iterator>
778 _M_equal_range_tr(const _Kt& __k);
779
780 template<typename _Kt,
781 typename = __has_is_transparent_t<_Hash, _Kt>,
782 typename = __has_is_transparent_t<_Equal, _Kt>>
783 pair<const_iterator, const_iterator>
784 _M_equal_range_tr(const _Kt& __k) const;
785#endif // C++20
786
787 private:
788 // Bucket index computation helpers.
789 size_type
790 _M_bucket_index(const __node_value_type& __n) const noexcept
791 { return __hash_code_base::_M_bucket_index(__n, _M_bucket_count); }
792
793 size_type
794 _M_bucket_index(__hash_code __c) const
795 { return __hash_code_base::_M_bucket_index(__c, _M_bucket_count); }
796
797 // Find and insert helper functions and types
798 // Find the node before the one matching the criteria.
799 __node_base_ptr
800 _M_find_before_node(size_type, const key_type&, __hash_code) const;
801
802 template<typename _Kt>
803 __node_base_ptr
804 _M_find_before_node_tr(size_type, const _Kt&, __hash_code) const;
805
806 __node_ptr
807 _M_find_node(size_type __bkt, const key_type& __key,
808 __hash_code __c) const
809 {
810 __node_base_ptr __before_n = _M_find_before_node(__bkt, __key, __c);
811 if (__before_n)
812 return static_cast<__node_ptr>(__before_n->_M_nxt);
813 return nullptr;
814 }
815
816 template<typename _Kt>
817 __node_ptr
818 _M_find_node_tr(size_type __bkt, const _Kt& __key,
819 __hash_code __c) const
820 {
821 auto __before_n = _M_find_before_node_tr(__bkt, __key, __c);
822 if (__before_n)
823 return static_cast<__node_ptr>(__before_n->_M_nxt);
824 return nullptr;
825 }
826
827 // Insert a node at the beginning of a bucket.
828 void
829 _M_insert_bucket_begin(size_type, __node_ptr);
830
831 // Remove the bucket first node
832 void
833 _M_remove_bucket_begin(size_type __bkt, __node_ptr __next_n,
834 size_type __next_bkt);
835
836 // Get the node before __n in the bucket __bkt
837 __node_base_ptr
838 _M_get_previous_node(size_type __bkt, __node_ptr __n);
839
840 // Insert node __n with hash code __code, in bucket __bkt if no
841 // rehash (assumes no element with same key already present).
842 // Takes ownership of __n if insertion succeeds, throws otherwise.
843 iterator
844 _M_insert_unique_node(size_type __bkt, __hash_code,
845 __node_ptr __n, size_type __n_elt = 1);
846
847 // Insert node __n with key __k and hash code __code.
848 // Takes ownership of __n if insertion succeeds, throws otherwise.
849 iterator
850 _M_insert_multi_node(__node_ptr __hint,
851 __hash_code __code, __node_ptr __n);
852
853 template<typename... _Args>
854 std::pair<iterator, bool>
855 _M_emplace(true_type __uks, _Args&&... __args);
856
857 template<typename... _Args>
858 iterator
859 _M_emplace(false_type __uks, _Args&&... __args)
860 { return _M_emplace(cend(), __uks, std::forward<_Args>(__args)...); }
861
862 // Emplace with hint, useless when keys are unique.
863 template<typename... _Args>
864 iterator
865 _M_emplace(const_iterator, true_type __uks, _Args&&... __args)
866 { return _M_emplace(__uks, std::forward<_Args>(__args)...).first; }
867
868 template<typename... _Args>
869 iterator
870 _M_emplace(const_iterator, false_type __uks, _Args&&... __args);
871
872 template<typename _Arg, typename _NodeGenerator>
873 std::pair<iterator, bool>
874 _M_insert(_Arg&&, const _NodeGenerator&, true_type __uks);
875
876 template<typename _Arg, typename _NodeGenerator>
877 iterator
878 _M_insert(_Arg&& __arg, const _NodeGenerator& __node_gen,
879 false_type __uks)
880 {
881 return _M_insert(cend(), std::forward<_Arg>(__arg), __node_gen,
882 __uks);
883 }
884
885 // Insert with hint, not used when keys are unique.
886 template<typename _Arg, typename _NodeGenerator>
887 iterator
888 _M_insert(const_iterator, _Arg&& __arg,
889 const _NodeGenerator& __node_gen, true_type __uks)
890 {
891 return
892 _M_insert(std::forward<_Arg>(__arg), __node_gen, __uks).first;
893 }
894
895 // Insert with hint when keys are not unique.
896 template<typename _Arg, typename _NodeGenerator>
897 iterator
898 _M_insert(const_iterator, _Arg&&,
899 const _NodeGenerator&, false_type __uks);
900
901 size_type
902 _M_erase(true_type __uks, const key_type&);
903
904 size_type
905 _M_erase(false_type __uks, const key_type&);
906
907 iterator
908 _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n);
909
910 public:
911 // Emplace
912 template<typename... _Args>
913 __ireturn_type
914 emplace(_Args&&... __args)
915 { return _M_emplace(__unique_keys{}, std::forward<_Args>(__args)...); }
916
917 template<typename... _Args>
918 iterator
919 emplace_hint(const_iterator __hint, _Args&&... __args)
920 {
921 return _M_emplace(__hint, __unique_keys{},
922 std::forward<_Args>(__args)...);
923 }
924
925 // Insert member functions via inheritance.
926
927 // Erase
928 iterator
929 erase(const_iterator);
930
931 // LWG 2059.
932 iterator
933 erase(iterator __it)
934 { return erase(const_iterator(__it)); }
935
936 size_type
937 erase(const key_type& __k)
938 { return _M_erase(__unique_keys{}, __k); }
939
940 iterator
941 erase(const_iterator, const_iterator);
942
943 void
944 clear() noexcept;
945
946 // Set number of buckets keeping it appropriate for container's number
947 // of elements.
948 void rehash(size_type __bkt_count);
949
950 // DR 1189.
951 // reserve, if present, comes from _Rehash_base.
952
953#if __cplusplus > 201402L
954 /// Re-insert an extracted node into a container with unique keys.
955 insert_return_type
956 _M_reinsert_node(node_type&& __nh)
957 {
958 insert_return_type __ret;
959 if (__nh.empty())
960 __ret.position = end();
961 else
962 {
963 __glibcxx_assert(get_allocator() == __nh.get_allocator());
964
965 const key_type& __k = __nh._M_key();
966 __hash_code __code = this->_M_hash_code(__k);
967 size_type __bkt = _M_bucket_index(__code);
968 if (__node_ptr __n = _M_find_node(__bkt, __k, __code))
969 {
970 __ret.node = std::move(__nh);
971 __ret.position = iterator(__n);
972 __ret.inserted = false;
973 }
974 else
975 {
976 __ret.position
977 = _M_insert_unique_node(__bkt, __code, __nh._M_ptr);
978 __nh._M_ptr = nullptr;
979 __ret.inserted = true;
980 }
981 }
982 return __ret;
983 }
984
985 /// Re-insert an extracted node into a container with equivalent keys.
986 iterator
987 _M_reinsert_node_multi(const_iterator __hint, node_type&& __nh)
988 {
989 if (__nh.empty())
990 return end();
991
992 __glibcxx_assert(get_allocator() == __nh.get_allocator());
993
994 const key_type& __k = __nh._M_key();
995 auto __code = this->_M_hash_code(__k);
996 auto __ret
997 = _M_insert_multi_node(__hint._M_cur, __code, __nh._M_ptr);
998 __nh._M_ptr = nullptr;
999 return __ret;
1000 }
1001
1002 private:
1003 node_type
1004 _M_extract_node(size_t __bkt, __node_base_ptr __prev_n)
1005 {
1006 __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
1007 if (__prev_n == _M_buckets[__bkt])
1008 _M_remove_bucket_begin(__bkt, __n->_M_next(),
1009 __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
1010 else if (__n->_M_nxt)
1011 {
1012 size_type __next_bkt = _M_bucket_index(*__n->_M_next());
1013 if (__next_bkt != __bkt)
1014 _M_buckets[__next_bkt] = __prev_n;
1015 }
1016
1017 __prev_n->_M_nxt = __n->_M_nxt;
1018 __n->_M_nxt = nullptr;
1019 --_M_element_count;
1020 return { __n, this->_M_node_allocator() };
1021 }
1022
1023 public:
1024 // Extract a node.
1025 node_type
1026 extract(const_iterator __pos)
1027 {
1028 size_t __bkt = _M_bucket_index(*__pos._M_cur);
1029 return _M_extract_node(__bkt,
1030 _M_get_previous_node(__bkt, __pos._M_cur));
1031 }
1032
1033 /// Extract a node.
1034 node_type
1035 extract(const _Key& __k)
1036 {
1037 node_type __nh;
1038 __hash_code __code = this->_M_hash_code(__k);
1039 std::size_t __bkt = _M_bucket_index(__code);
1040 if (__node_base_ptr __prev_node = _M_find_before_node(__bkt, __k, __code))
1041 __nh = _M_extract_node(__bkt, __prev_node);
1042 return __nh;
1043 }
1044
1045 /// Merge from a compatible container into one with unique keys.
1046 template<typename _Compatible_Hashtable>
1047 void
1048 _M_merge_unique(_Compatible_Hashtable& __src) noexcept
1049 {
1050 static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
1051 node_type>, "Node types are compatible");
1052 __glibcxx_assert(get_allocator() == __src.get_allocator());
1053
1054 auto __n_elt = __src.size();
1055 for (auto __i = __src.begin(), __end = __src.end(); __i != __end;)
1056 {
1057 auto __pos = __i++;
1058 const key_type& __k = _ExtractKey{}(*__pos);
1059 __hash_code __code = this->_M_hash_code(__k);
1060 size_type __bkt = _M_bucket_index(__code);
1061 if (_M_find_node(__bkt, __k, __code) == nullptr)
1062 {
1063 auto __nh = __src.extract(__pos);
1064 _M_insert_unique_node(__bkt, __code, __nh._M_ptr, __n_elt);
1065 __nh._M_ptr = nullptr;
1066 __n_elt = 1;
1067 }
1068 else if (__n_elt != 1)
1069 --__n_elt;
1070 }
1071 }
1072
1073 /// Merge from a compatible container into one with equivalent keys.
1074 template<typename _Compatible_Hashtable>
1075 void
1076 _M_merge_multi(_Compatible_Hashtable& __src) noexcept
1077 {
1078 static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
1079 node_type>, "Node types are compatible");
1080 __glibcxx_assert(get_allocator() == __src.get_allocator());
1081
1082 this->reserve(size() + __src.size());
1083 for (auto __i = __src.begin(), __end = __src.end(); __i != __end;)
1084 _M_reinsert_node_multi(cend(), __src.extract(__i++));
1085 }
1086#endif // C++17
1087
1088 private:
1089 // Helper rehash method used when keys are unique.
1090 void _M_rehash_aux(size_type __bkt_count, true_type __uks);
1091
1092 // Helper rehash method used when keys can be non-unique.
1093 void _M_rehash_aux(size_type __bkt_count, false_type __uks);
1094
1095 // Unconditionally change size of bucket array to n, restore
1096 // hash policy state to __state on exception.
1097 void _M_rehash(size_type __bkt_count, const __rehash_state& __state);
1098 };
1099
1100
1101 // Definitions of class template _Hashtable's out-of-line member functions.
1102 template<typename _Key, typename _Value, typename _Alloc,
1103 typename _ExtractKey, typename _Equal,
1104 typename _Hash, typename _RangeHash, typename _Unused,
1105 typename _RehashPolicy, typename _Traits>
1106 auto
1107 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1108 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1109 _M_bucket_begin(size_type __bkt) const
1110 -> __node_ptr
1111 {
1112 __node_base_ptr __n = _M_buckets[__bkt];
1113 return __n ? static_cast<__node_ptr>(__n->_M_nxt) : nullptr;
1114 }
1115
1116 template<typename _Key, typename _Value, typename _Alloc,
1117 typename _ExtractKey, typename _Equal,
1118 typename _Hash, typename _RangeHash, typename _Unused,
1119 typename _RehashPolicy, typename _Traits>
1120 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1121 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1122 _Hashtable(size_type __bkt_count_hint,
1123 const _Hash& __h, const _Equal& __eq, const allocator_type& __a)
1124 : _Hashtable(__h, __eq, __a)
1125 {
1126 auto __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count_hint);
1127 if (__bkt_count > _M_bucket_count)
1128 {
1129 _M_buckets = _M_allocate_buckets(__bkt_count);
1130 _M_bucket_count = __bkt_count;
1131 }
1132 }
1133
1134 template<typename _Key, typename _Value, typename _Alloc,
1135 typename _ExtractKey, typename _Equal,
1136 typename _Hash, typename _RangeHash, typename _Unused,
1137 typename _RehashPolicy, typename _Traits>
1138 template<typename _InputIterator>
1139 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1140 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1141 _Hashtable(_InputIterator __f, _InputIterator __l,
1142 size_type __bkt_count_hint,
1143 const _Hash& __h, const _Equal& __eq,
1144 const allocator_type& __a, true_type /* __uks */)
1145 : _Hashtable(__bkt_count_hint, __h, __eq, __a)
1146 {
1147 for (; __f != __l; ++__f)
1148 this->insert(*__f);
1149 }
1150
1151 template<typename _Key, typename _Value, typename _Alloc,
1152 typename _ExtractKey, typename _Equal,
1153 typename _Hash, typename _RangeHash, typename _Unused,
1154 typename _RehashPolicy, typename _Traits>
1155 template<typename _InputIterator>
1156 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1157 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1158 _Hashtable(_InputIterator __f, _InputIterator __l,
1159 size_type __bkt_count_hint,
1160 const _Hash& __h, const _Equal& __eq,
1161 const allocator_type& __a, false_type /* __uks */)
1162 : _Hashtable(__h, __eq, __a)
1163 {
1164 auto __nb_elems = __detail::__distance_fw(__f, __l);
1165 auto __bkt_count =
1166 _M_rehash_policy._M_next_bkt(
1167 std::max(_M_rehash_policy._M_bkt_for_elements(__nb_elems),
1168 __bkt_count_hint));
1169
1170 if (__bkt_count > _M_bucket_count)
1171 {
1172 _M_buckets = _M_allocate_buckets(__bkt_count);
1173 _M_bucket_count = __bkt_count;
1174 }
1175
1176 for (; __f != __l; ++__f)
1177 this->insert(*__f);
1178 }
1179
1180 template<typename _Key, typename _Value, typename _Alloc,
1181 typename _ExtractKey, typename _Equal,
1182 typename _Hash, typename _RangeHash, typename _Unused,
1183 typename _RehashPolicy, typename _Traits>
1184 auto
1185 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1186 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1187 operator=(const _Hashtable& __ht)
1188 -> _Hashtable&
1189 {
1190 if (&__ht == this)
1191 return *this;
1192
1193 if (__node_alloc_traits::_S_propagate_on_copy_assign())
1194 {
1195 auto& __this_alloc = this->_M_node_allocator();
1196 auto& __that_alloc = __ht._M_node_allocator();
1197 if (!__node_alloc_traits::_S_always_equal()
1198 && __this_alloc != __that_alloc)
1199 {
1200 // Replacement allocator cannot free existing storage.
1201 this->_M_deallocate_nodes(_M_begin());
1202 _M_before_begin._M_nxt = nullptr;
1203 _M_deallocate_buckets();
1204 _M_buckets = nullptr;
1205 std::__alloc_on_copy(__this_alloc, __that_alloc);
1206 __hashtable_base::operator=(__ht);
1207 _M_bucket_count = __ht._M_bucket_count;
1208 _M_element_count = __ht._M_element_count;
1209 _M_rehash_policy = __ht._M_rehash_policy;
1210 __alloc_node_gen_t __alloc_node_gen(*this);
1211 __try
1212 {
1213 _M_assign(__ht, __alloc_node_gen);
1214 }
1215 __catch(...)
1216 {
1217 // _M_assign took care of deallocating all memory. Now we
1218 // must make sure this instance remains in a usable state.
1219 _M_reset();
1220 __throw_exception_again;
1221 }
1222 return *this;
1223 }
1224 std::__alloc_on_copy(__this_alloc, __that_alloc);
1225 }
1226
1227 // Reuse allocated buckets and nodes.
1228 _M_assign_elements(__ht);
1229 return *this;
1230 }
1231
1232 template<typename _Key, typename _Value, typename _Alloc,
1233 typename _ExtractKey, typename _Equal,
1234 typename _Hash, typename _RangeHash, typename _Unused,
1235 typename _RehashPolicy, typename _Traits>
1236 template<typename _Ht>
1237 void
1238 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1239 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1240 _M_assign_elements(_Ht&& __ht)
1241 {
1242 __buckets_ptr __former_buckets = nullptr;
1243 std::size_t __former_bucket_count = _M_bucket_count;
1244 const __rehash_state& __former_state = _M_rehash_policy._M_state();
1245
1246 if (_M_bucket_count != __ht._M_bucket_count)
1247 {
1248 __former_buckets = _M_buckets;
1249 _M_buckets = _M_allocate_buckets(bkt_count: __ht._M_bucket_count);
1250 _M_bucket_count = __ht._M_bucket_count;
1251 }
1252 else
1253 __builtin_memset(_M_buckets, 0,
1254 _M_bucket_count * sizeof(__node_base_ptr));
1255
1256 __try
1257 {
1258 __hashtable_base::operator=(std::forward<_Ht>(__ht));
1259 _M_element_count = __ht._M_element_count;
1260 _M_rehash_policy = __ht._M_rehash_policy;
1261 __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
1262 _M_before_begin._M_nxt = nullptr;
1263 _M_assign(std::forward<_Ht>(__ht), __roan);
1264 if (__former_buckets)
1265 _M_deallocate_buckets(__former_buckets, __former_bucket_count);
1266 }
1267 __catch(...)
1268 {
1269 if (__former_buckets)
1270 {
1271 // Restore previous buckets.
1272 _M_deallocate_buckets();
1273 _M_rehash_policy._M_reset(__former_state);
1274 _M_buckets = __former_buckets;
1275 _M_bucket_count = __former_bucket_count;
1276 }
1277 __builtin_memset(_M_buckets, 0,
1278 _M_bucket_count * sizeof(__node_base_ptr));
1279 __throw_exception_again;
1280 }
1281 }
1282
1283 template<typename _Key, typename _Value, typename _Alloc,
1284 typename _ExtractKey, typename _Equal,
1285 typename _Hash, typename _RangeHash, typename _Unused,
1286 typename _RehashPolicy, typename _Traits>
1287 template<typename _Ht, typename _NodeGenerator>
1288 void
1289 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1290 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1291 _M_assign(_Ht&& __ht, const _NodeGenerator& __node_gen)
1292 {
1293 __buckets_ptr __buckets = nullptr;
1294 if (!_M_buckets)
1295 _M_buckets = __buckets = _M_allocate_buckets(bkt_count: _M_bucket_count);
1296
1297 __try
1298 {
1299 if (!__ht._M_before_begin._M_nxt)
1300 return;
1301
1302 // First deal with the special first node pointed to by
1303 // _M_before_begin.
1304 __node_ptr __ht_n = __ht._M_begin();
1305 __node_ptr __this_n
1306 = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
1307 this->_M_copy_code(*__this_n, *__ht_n);
1308 _M_update_bbegin(__this_n);
1309
1310 // Then deal with other nodes.
1311 __node_ptr __prev_n = __this_n;
1312 for (__ht_n = __ht_n->_M_next(); __ht_n; __ht_n = __ht_n->_M_next())
1313 {
1314 __this_n = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
1315 __prev_n->_M_nxt = __this_n;
1316 this->_M_copy_code(*__this_n, *__ht_n);
1317 size_type __bkt = _M_bucket_index(*__this_n);
1318 if (!_M_buckets[__bkt])
1319 _M_buckets[__bkt] = __prev_n;
1320 __prev_n = __this_n;
1321 }
1322 }
1323 __catch(...)
1324 {
1325 clear();
1326 if (__buckets)
1327 _M_deallocate_buckets();
1328 __throw_exception_again;
1329 }
1330 }
1331
1332 template<typename _Key, typename _Value, typename _Alloc,
1333 typename _ExtractKey, typename _Equal,
1334 typename _Hash, typename _RangeHash, typename _Unused,
1335 typename _RehashPolicy, typename _Traits>
1336 void
1337 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1338 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1339 _M_reset() noexcept
1340 {
1341 _M_rehash_policy._M_reset();
1342 _M_bucket_count = 1;
1343 _M_single_bucket = nullptr;
1344 _M_buckets = &_M_single_bucket;
1345 _M_before_begin._M_nxt = nullptr;
1346 _M_element_count = 0;
1347 }
1348
1349 template<typename _Key, typename _Value, typename _Alloc,
1350 typename _ExtractKey, typename _Equal,
1351 typename _Hash, typename _RangeHash, typename _Unused,
1352 typename _RehashPolicy, typename _Traits>
1353 void
1354 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1355 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1356 _M_move_assign(_Hashtable&& __ht, true_type)
1357 {
1358 if (__builtin_expect(std::__addressof(__ht) == this, false))
1359 return;
1360
1361 this->_M_deallocate_nodes(_M_begin());
1362 _M_deallocate_buckets();
1363 __hashtable_base::operator=(std::move(__ht));
1364 _M_rehash_policy = __ht._M_rehash_policy;
1365 if (!__ht._M_uses_single_bucket())
1366 _M_buckets = __ht._M_buckets;
1367 else
1368 {
1369 _M_buckets = &_M_single_bucket;
1370 _M_single_bucket = __ht._M_single_bucket;
1371 }
1372
1373 _M_bucket_count = __ht._M_bucket_count;
1374 _M_before_begin._M_nxt = __ht._M_before_begin._M_nxt;
1375 _M_element_count = __ht._M_element_count;
1376 std::__alloc_on_move(this->_M_node_allocator(), __ht._M_node_allocator());
1377
1378 // Fix bucket containing the _M_before_begin pointer that can't be moved.
1379 _M_update_bbegin();
1380 __ht._M_reset();
1381 }
1382
1383 template<typename _Key, typename _Value, typename _Alloc,
1384 typename _ExtractKey, typename _Equal,
1385 typename _Hash, typename _RangeHash, typename _Unused,
1386 typename _RehashPolicy, typename _Traits>
1387 void
1388 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1389 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1390 _M_move_assign(_Hashtable&& __ht, false_type)
1391 {
1392 if (__ht._M_node_allocator() == this->_M_node_allocator())
1393 _M_move_assign(std::move(__ht), true_type{});
1394 else
1395 {
1396 // Can't move memory, move elements then.
1397 _M_assign_elements(std::move(__ht));
1398 __ht.clear();
1399 }
1400 }
1401
1402 template<typename _Key, typename _Value, typename _Alloc,
1403 typename _ExtractKey, typename _Equal,
1404 typename _Hash, typename _RangeHash, typename _Unused,
1405 typename _RehashPolicy, typename _Traits>
1406 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1407 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1408 _Hashtable(const _Hashtable& __ht)
1409 : __hashtable_base(__ht),
1410 __map_base(__ht),
1411 __rehash_base(__ht),
1412 __hashtable_alloc(
1413 __node_alloc_traits::_S_select_on_copy(__ht._M_node_allocator())),
1414 __enable_default_ctor(__ht),
1415 _M_buckets(nullptr),
1416 _M_bucket_count(__ht._M_bucket_count),
1417 _M_element_count(__ht._M_element_count),
1418 _M_rehash_policy(__ht._M_rehash_policy)
1419 {
1420 __alloc_node_gen_t __alloc_node_gen(*this);
1421 _M_assign(__ht, __alloc_node_gen);
1422 }
1423
1424 template<typename _Key, typename _Value, typename _Alloc,
1425 typename _ExtractKey, typename _Equal,
1426 typename _Hash, typename _RangeHash, typename _Unused,
1427 typename _RehashPolicy, typename _Traits>
1428 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1429 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1430 _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1431 true_type /* alloc always equal */)
1432 noexcept(_S_nothrow_move())
1433 : __hashtable_base(__ht),
1434 __map_base(__ht),
1435 __rehash_base(__ht),
1436 __hashtable_alloc(std::move(__a)),
1437 __enable_default_ctor(__ht),
1438 _M_buckets(__ht._M_buckets),
1439 _M_bucket_count(__ht._M_bucket_count),
1440 _M_before_begin(__ht._M_before_begin._M_nxt),
1441 _M_element_count(__ht._M_element_count),
1442 _M_rehash_policy(__ht._M_rehash_policy)
1443 {
1444 // Update buckets if __ht is using its single bucket.
1445 if (__ht._M_uses_single_bucket())
1446 {
1447 _M_buckets = &_M_single_bucket;
1448 _M_single_bucket = __ht._M_single_bucket;
1449 }
1450
1451 // Fix bucket containing the _M_before_begin pointer that can't be moved.
1452 _M_update_bbegin();
1453
1454 __ht._M_reset();
1455 }
1456
1457 template<typename _Key, typename _Value, typename _Alloc,
1458 typename _ExtractKey, typename _Equal,
1459 typename _Hash, typename _RangeHash, typename _Unused,
1460 typename _RehashPolicy, typename _Traits>
1461 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1462 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1463 _Hashtable(const _Hashtable& __ht, const allocator_type& __a)
1464 : __hashtable_base(__ht),
1465 __map_base(__ht),
1466 __rehash_base(__ht),
1467 __hashtable_alloc(__node_alloc_type(__a)),
1468 __enable_default_ctor(__ht),
1469 _M_buckets(),
1470 _M_bucket_count(__ht._M_bucket_count),
1471 _M_element_count(__ht._M_element_count),
1472 _M_rehash_policy(__ht._M_rehash_policy)
1473 {
1474 __alloc_node_gen_t __alloc_node_gen(*this);
1475 _M_assign(__ht, __alloc_node_gen);
1476 }
1477
1478 template<typename _Key, typename _Value, typename _Alloc,
1479 typename _ExtractKey, typename _Equal,
1480 typename _Hash, typename _RangeHash, typename _Unused,
1481 typename _RehashPolicy, typename _Traits>
1482 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1483 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1484 _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1485 false_type /* alloc always equal */)
1486 : __hashtable_base(__ht),
1487 __map_base(__ht),
1488 __rehash_base(__ht),
1489 __hashtable_alloc(std::move(__a)),
1490 __enable_default_ctor(__ht),
1491 _M_buckets(nullptr),
1492 _M_bucket_count(__ht._M_bucket_count),
1493 _M_element_count(__ht._M_element_count),
1494 _M_rehash_policy(__ht._M_rehash_policy)
1495 {
1496 if (__ht._M_node_allocator() == this->_M_node_allocator())
1497 {
1498 if (__ht._M_uses_single_bucket())
1499 {
1500 _M_buckets = &_M_single_bucket;
1501 _M_single_bucket = __ht._M_single_bucket;
1502 }
1503 else
1504 _M_buckets = __ht._M_buckets;
1505
1506 // Fix bucket containing the _M_before_begin pointer that can't be
1507 // moved.
1508 _M_update_bbegin(__ht._M_begin());
1509
1510 __ht._M_reset();
1511 }
1512 else
1513 {
1514 __alloc_node_gen_t __alloc_gen(*this);
1515
1516 using _Fwd_Ht = typename
1517 conditional<__move_if_noexcept_cond<value_type>::value,
1518 const _Hashtable&, _Hashtable&&>::type;
1519 _M_assign(std::forward<_Fwd_Ht>(__ht), __alloc_gen);
1520 __ht.clear();
1521 }
1522 }
1523
1524 template<typename _Key, typename _Value, typename _Alloc,
1525 typename _ExtractKey, typename _Equal,
1526 typename _Hash, typename _RangeHash, typename _Unused,
1527 typename _RehashPolicy, typename _Traits>
1528 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1529 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1530 ~_Hashtable() noexcept
1531 {
1532 clear();
1533 _M_deallocate_buckets();
1534 }
1535
1536 template<typename _Key, typename _Value, typename _Alloc,
1537 typename _ExtractKey, typename _Equal,
1538 typename _Hash, typename _RangeHash, typename _Unused,
1539 typename _RehashPolicy, typename _Traits>
1540 void
1541 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1542 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1543 swap(_Hashtable& __x)
1544 noexcept(__and_<__is_nothrow_swappable<_Hash>,
1545 __is_nothrow_swappable<_Equal>>::value)
1546 {
1547 // The only base class with member variables is hash_code_base.
1548 // We define _Hash_code_base::_M_swap because different
1549 // specializations have different members.
1550 this->_M_swap(__x);
1551
1552 std::__alloc_on_swap(this->_M_node_allocator(), __x._M_node_allocator());
1553 std::swap(_M_rehash_policy, __x._M_rehash_policy);
1554
1555 // Deal properly with potentially moved instances.
1556 if (this->_M_uses_single_bucket())
1557 {
1558 if (!__x._M_uses_single_bucket())
1559 {
1560 _M_buckets = __x._M_buckets;
1561 __x._M_buckets = &__x._M_single_bucket;
1562 }
1563 }
1564 else if (__x._M_uses_single_bucket())
1565 {
1566 __x._M_buckets = _M_buckets;
1567 _M_buckets = &_M_single_bucket;
1568 }
1569 else
1570 std::swap(_M_buckets, __x._M_buckets);
1571
1572 std::swap(_M_bucket_count, __x._M_bucket_count);
1573 std::swap(_M_before_begin._M_nxt, __x._M_before_begin._M_nxt);
1574 std::swap(_M_element_count, __x._M_element_count);
1575 std::swap(_M_single_bucket, __x._M_single_bucket);
1576
1577 // Fix buckets containing the _M_before_begin pointers that can't be
1578 // swapped.
1579 _M_update_bbegin();
1580 __x._M_update_bbegin();
1581 }
1582
1583 template<typename _Key, typename _Value, typename _Alloc,
1584 typename _ExtractKey, typename _Equal,
1585 typename _Hash, typename _RangeHash, typename _Unused,
1586 typename _RehashPolicy, typename _Traits>
1587 auto
1588 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1589 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1590 find(const key_type& __k)
1591 -> iterator
1592 {
1593 __hash_code __code = this->_M_hash_code(__k);
1594 std::size_t __bkt = _M_bucket_index(__code);
1595 return iterator(_M_find_node(__bkt, key: __k, c: __code));
1596 }
1597
1598 template<typename _Key, typename _Value, typename _Alloc,
1599 typename _ExtractKey, typename _Equal,
1600 typename _Hash, typename _RangeHash, typename _Unused,
1601 typename _RehashPolicy, typename _Traits>
1602 auto
1603 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1604 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1605 find(const key_type& __k) const
1606 -> const_iterator
1607 {
1608 __hash_code __code = this->_M_hash_code(__k);
1609 std::size_t __bkt = _M_bucket_index(__code);
1610 return const_iterator(_M_find_node(__bkt, key: __k, c: __code));
1611 }
1612
1613#if __cplusplus > 201703L
1614 template<typename _Key, typename _Value, typename _Alloc,
1615 typename _ExtractKey, typename _Equal,
1616 typename _Hash, typename _RangeHash, typename _Unused,
1617 typename _RehashPolicy, typename _Traits>
1618 template<typename _Kt, typename, typename>
1619 auto
1620 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1621 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1622 _M_find_tr(const _Kt& __k)
1623 -> iterator
1624 {
1625 __hash_code __code = this->_M_hash_code_tr(__k);
1626 std::size_t __bkt = _M_bucket_index(__code);
1627 return iterator(_M_find_node_tr(__bkt, __k, __code));
1628 }
1629
1630 template<typename _Key, typename _Value, typename _Alloc,
1631 typename _ExtractKey, typename _Equal,
1632 typename _Hash, typename _RangeHash, typename _Unused,
1633 typename _RehashPolicy, typename _Traits>
1634 template<typename _Kt, typename, typename>
1635 auto
1636 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1637 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1638 _M_find_tr(const _Kt& __k) const
1639 -> const_iterator
1640 {
1641 __hash_code __code = this->_M_hash_code_tr(__k);
1642 std::size_t __bkt = _M_bucket_index(__code);
1643 return const_iterator(_M_find_node_tr(__bkt, __k, __code));
1644 }
1645#endif
1646
1647 template<typename _Key, typename _Value, typename _Alloc,
1648 typename _ExtractKey, typename _Equal,
1649 typename _Hash, typename _RangeHash, typename _Unused,
1650 typename _RehashPolicy, typename _Traits>
1651 auto
1652 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1653 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1654 count(const key_type& __k) const
1655 -> size_type
1656 {
1657 auto __it = find(__k);
1658 if (!__it._M_cur)
1659 return 0;
1660
1661 if (__unique_keys::value)
1662 return 1;
1663
1664 // All equivalent values are next to each other, if we find a
1665 // non-equivalent value after an equivalent one it means that we won't
1666 // find any new equivalent value.
1667 size_type __result = 1;
1668 for (auto __ref = __it++;
1669 __it._M_cur && this->_M_node_equals(*__ref._M_cur, *__it._M_cur);
1670 ++__it)
1671 ++__result;
1672
1673 return __result;
1674 }
1675
1676#if __cplusplus > 201703L
1677 template<typename _Key, typename _Value, typename _Alloc,
1678 typename _ExtractKey, typename _Equal,
1679 typename _Hash, typename _RangeHash, typename _Unused,
1680 typename _RehashPolicy, typename _Traits>
1681 template<typename _Kt, typename, typename>
1682 auto
1683 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1684 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1685 _M_count_tr(const _Kt& __k) const
1686 -> size_type
1687 {
1688 __hash_code __code = this->_M_hash_code_tr(__k);
1689 std::size_t __bkt = _M_bucket_index(__code);
1690 auto __n = _M_find_node_tr(__bkt, __k, __code);
1691 if (!__n)
1692 return 0;
1693
1694 // All equivalent values are next to each other, if we find a
1695 // non-equivalent value after an equivalent one it means that we won't
1696 // find any new equivalent value.
1697 iterator __it(__n);
1698 size_type __result = 1;
1699 for (++__it;
1700 __it._M_cur && this->_M_equals_tr(__k, __code, *__it._M_cur);
1701 ++__it)
1702 ++__result;
1703
1704 return __result;
1705 }
1706#endif
1707
1708 template<typename _Key, typename _Value, typename _Alloc,
1709 typename _ExtractKey, typename _Equal,
1710 typename _Hash, typename _RangeHash, typename _Unused,
1711 typename _RehashPolicy, typename _Traits>
1712 auto
1713 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1714 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1715 equal_range(const key_type& __k)
1716 -> pair<iterator, iterator>
1717 {
1718 auto __ite = find(__k);
1719 if (!__ite._M_cur)
1720 return { __ite, __ite };
1721
1722 auto __beg = __ite++;
1723 if (__unique_keys::value)
1724 return { __beg, __ite };
1725
1726 // All equivalent values are next to each other, if we find a
1727 // non-equivalent value after an equivalent one it means that we won't
1728 // find any new equivalent value.
1729 while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
1730 ++__ite;
1731
1732 return { __beg, __ite };
1733 }
1734
1735 template<typename _Key, typename _Value, typename _Alloc,
1736 typename _ExtractKey, typename _Equal,
1737 typename _Hash, typename _RangeHash, typename _Unused,
1738 typename _RehashPolicy, typename _Traits>
1739 auto
1740 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1741 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1742 equal_range(const key_type& __k) const
1743 -> pair<const_iterator, const_iterator>
1744 {
1745 auto __ite = find(__k);
1746 if (!__ite._M_cur)
1747 return { __ite, __ite };
1748
1749 auto __beg = __ite++;
1750 if (__unique_keys::value)
1751 return { __beg, __ite };
1752
1753 // All equivalent values are next to each other, if we find a
1754 // non-equivalent value after an equivalent one it means that we won't
1755 // find any new equivalent value.
1756 while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
1757 ++__ite;
1758
1759 return { __beg, __ite };
1760 }
1761
1762#if __cplusplus > 201703L
1763 template<typename _Key, typename _Value, typename _Alloc,
1764 typename _ExtractKey, typename _Equal,
1765 typename _Hash, typename _RangeHash, typename _Unused,
1766 typename _RehashPolicy, typename _Traits>
1767 template<typename _Kt, typename, typename>
1768 auto
1769 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1770 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1771 _M_equal_range_tr(const _Kt& __k)
1772 -> pair<iterator, iterator>
1773 {
1774 __hash_code __code = this->_M_hash_code_tr(__k);
1775 std::size_t __bkt = _M_bucket_index(__code);
1776 auto __n = _M_find_node_tr(__bkt, __k, __code);
1777 iterator __ite(__n);
1778 if (!__n)
1779 return { __ite, __ite };
1780
1781 // All equivalent values are next to each other, if we find a
1782 // non-equivalent value after an equivalent one it means that we won't
1783 // find any new equivalent value.
1784 auto __beg = __ite++;
1785 while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
1786 ++__ite;
1787
1788 return { __beg, __ite };
1789 }
1790
1791 template<typename _Key, typename _Value, typename _Alloc,
1792 typename _ExtractKey, typename _Equal,
1793 typename _Hash, typename _RangeHash, typename _Unused,
1794 typename _RehashPolicy, typename _Traits>
1795 template<typename _Kt, typename, typename>
1796 auto
1797 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1798 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1799 _M_equal_range_tr(const _Kt& __k) const
1800 -> pair<const_iterator, const_iterator>
1801 {
1802 __hash_code __code = this->_M_hash_code_tr(__k);
1803 std::size_t __bkt = _M_bucket_index(__code);
1804 auto __n = _M_find_node_tr(__bkt, __k, __code);
1805 const_iterator __ite(__n);
1806 if (!__n)
1807 return { __ite, __ite };
1808
1809 // All equivalent values are next to each other, if we find a
1810 // non-equivalent value after an equivalent one it means that we won't
1811 // find any new equivalent value.
1812 auto __beg = __ite++;
1813 while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
1814 ++__ite;
1815
1816 return { __beg, __ite };
1817 }
1818#endif
1819
1820 // Find the node before the one whose key compares equal to k in the bucket
1821 // bkt. Return nullptr if no node is found.
1822 template<typename _Key, typename _Value, typename _Alloc,
1823 typename _ExtractKey, typename _Equal,
1824 typename _Hash, typename _RangeHash, typename _Unused,
1825 typename _RehashPolicy, typename _Traits>
1826 auto
1827 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1828 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1829 _M_find_before_node(size_type __bkt, const key_type& __k,
1830 __hash_code __code) const
1831 -> __node_base_ptr
1832 {
1833 __node_base_ptr __prev_p = _M_buckets[__bkt];
1834 if (!__prev_p)
1835 return nullptr;
1836
1837 for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
1838 __p = __p->_M_next())
1839 {
1840 if (this->_M_equals(__k, __code, *__p))
1841 return __prev_p;
1842
1843 if (!__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt)
1844 break;
1845 __prev_p = __p;
1846 }
1847
1848 return nullptr;
1849 }
1850
1851 template<typename _Key, typename _Value, typename _Alloc,
1852 typename _ExtractKey, typename _Equal,
1853 typename _Hash, typename _RangeHash, typename _Unused,
1854 typename _RehashPolicy, typename _Traits>
1855 template<typename _Kt>
1856 auto
1857 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1858 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1859 _M_find_before_node_tr(size_type __bkt, const _Kt& __k,
1860 __hash_code __code) const
1861 -> __node_base_ptr
1862 {
1863 __node_base_ptr __prev_p = _M_buckets[__bkt];
1864 if (!__prev_p)
1865 return nullptr;
1866
1867 for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
1868 __p = __p->_M_next())
1869 {
1870 if (this->_M_equals_tr(__k, __code, *__p))
1871 return __prev_p;
1872
1873 if (!__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt)
1874 break;
1875 __prev_p = __p;
1876 }
1877
1878 return nullptr;
1879 }
1880
1881 template<typename _Key, typename _Value, typename _Alloc,
1882 typename _ExtractKey, typename _Equal,
1883 typename _Hash, typename _RangeHash, typename _Unused,
1884 typename _RehashPolicy, typename _Traits>
1885 void
1886 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1887 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1888 _M_insert_bucket_begin(size_type __bkt, __node_ptr __node)
1889 {
1890 if (_M_buckets[__bkt])
1891 {
1892 // Bucket is not empty, we just need to insert the new node
1893 // after the bucket before begin.
1894 __node->_M_nxt = _M_buckets[__bkt]->_M_nxt;
1895 _M_buckets[__bkt]->_M_nxt = __node;
1896 }
1897 else
1898 {
1899 // The bucket is empty, the new node is inserted at the
1900 // beginning of the singly-linked list and the bucket will
1901 // contain _M_before_begin pointer.
1902 __node->_M_nxt = _M_before_begin._M_nxt;
1903 _M_before_begin._M_nxt = __node;
1904
1905 if (__node->_M_nxt)
1906 // We must update former begin bucket that is pointing to
1907 // _M_before_begin.
1908 _M_buckets[_M_bucket_index(*__node->_M_next())] = __node;
1909
1910 _M_buckets[__bkt] = &_M_before_begin;
1911 }
1912 }
1913
1914 template<typename _Key, typename _Value, typename _Alloc,
1915 typename _ExtractKey, typename _Equal,
1916 typename _Hash, typename _RangeHash, typename _Unused,
1917 typename _RehashPolicy, typename _Traits>
1918 void
1919 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1920 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1921 _M_remove_bucket_begin(size_type __bkt, __node_ptr __next,
1922 size_type __next_bkt)
1923 {
1924 if (!__next || __next_bkt != __bkt)
1925 {
1926 // Bucket is now empty
1927 // First update next bucket if any
1928 if (__next)
1929 _M_buckets[__next_bkt] = _M_buckets[__bkt];
1930
1931 // Second update before begin node if necessary
1932 if (&_M_before_begin == _M_buckets[__bkt])
1933 _M_before_begin._M_nxt = __next;
1934 _M_buckets[__bkt] = nullptr;
1935 }
1936 }
1937
1938 template<typename _Key, typename _Value, typename _Alloc,
1939 typename _ExtractKey, typename _Equal,
1940 typename _Hash, typename _RangeHash, typename _Unused,
1941 typename _RehashPolicy, typename _Traits>
1942 auto
1943 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1944 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1945 _M_get_previous_node(size_type __bkt, __node_ptr __n)
1946 -> __node_base_ptr
1947 {
1948 __node_base_ptr __prev_n = _M_buckets[__bkt];
1949 while (__prev_n->_M_nxt != __n)
1950 __prev_n = __prev_n->_M_nxt;
1951 return __prev_n;
1952 }
1953
1954 template<typename _Key, typename _Value, typename _Alloc,
1955 typename _ExtractKey, typename _Equal,
1956 typename _Hash, typename _RangeHash, typename _Unused,
1957 typename _RehashPolicy, typename _Traits>
1958 template<typename... _Args>
1959 auto
1960 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1961 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1962 _M_emplace(true_type /* __uks */, _Args&&... __args)
1963 -> pair<iterator, bool>
1964 {
1965 // First build the node to get access to the hash code
1966 _Scoped_node __node { this, std::forward<_Args>(__args)... };
1967 const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
1968 __hash_code __code = this->_M_hash_code(__k);
1969 size_type __bkt = _M_bucket_index(__code);
1970 if (__node_ptr __p = _M_find_node(__bkt, key: __k, c: __code))
1971 // There is already an equivalent node, no insertion
1972 return std::make_pair(iterator(__p), false);
1973
1974 // Insert the node
1975 auto __pos = _M_insert_unique_node(__bkt, __code, n: __node._M_node);
1976 __node._M_node = nullptr;
1977 return { __pos, true };
1978 }
1979
1980 template<typename _Key, typename _Value, typename _Alloc,
1981 typename _ExtractKey, typename _Equal,
1982 typename _Hash, typename _RangeHash, typename _Unused,
1983 typename _RehashPolicy, typename _Traits>
1984 template<typename... _Args>
1985 auto
1986 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1987 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1988 _M_emplace(const_iterator __hint, false_type /* __uks */,
1989 _Args&&... __args)
1990 -> iterator
1991 {
1992 // First build the node to get its hash code.
1993 _Scoped_node __node { this, std::forward<_Args>(__args)... };
1994 const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
1995
1996 __hash_code __code = this->_M_hash_code(__k);
1997 auto __pos
1998 = _M_insert_multi_node(hint: __hint._M_cur, __code, n: __node._M_node);
1999 __node._M_node = nullptr;
2000 return __pos;
2001 }
2002
2003 template<typename _Key, typename _Value, typename _Alloc,
2004 typename _ExtractKey, typename _Equal,
2005 typename _Hash, typename _RangeHash, typename _Unused,
2006 typename _RehashPolicy, typename _Traits>
2007 auto
2008 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2009 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2010 _M_insert_unique_node(size_type __bkt, __hash_code __code,
2011 __node_ptr __node, size_type __n_elt)
2012 -> iterator
2013 {
2014 const __rehash_state& __saved_state = _M_rehash_policy._M_state();
2015 std::pair<bool, std::size_t> __do_rehash
2016 = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count,
2017 __n_elt);
2018
2019 if (__do_rehash.first)
2020 {
2021 _M_rehash(bkt_count: __do_rehash.second, state: __saved_state);
2022 __bkt = _M_bucket_index(__code);
2023 }
2024
2025 this->_M_store_code(*__node, __code);
2026
2027 // Always insert at the beginning of the bucket.
2028 _M_insert_bucket_begin(__bkt, __node);
2029 ++_M_element_count;
2030 return iterator(__node);
2031 }
2032
2033 template<typename _Key, typename _Value, typename _Alloc,
2034 typename _ExtractKey, typename _Equal,
2035 typename _Hash, typename _RangeHash, typename _Unused,
2036 typename _RehashPolicy, typename _Traits>
2037 auto
2038 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2039 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2040 _M_insert_multi_node(__node_ptr __hint,
2041 __hash_code __code, __node_ptr __node)
2042 -> iterator
2043 {
2044 const __rehash_state& __saved_state = _M_rehash_policy._M_state();
2045 std::pair<bool, std::size_t> __do_rehash
2046 = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1);
2047
2048 if (__do_rehash.first)
2049 _M_rehash(bkt_count: __do_rehash.second, state: __saved_state);
2050
2051 this->_M_store_code(*__node, __code);
2052 const key_type& __k = _ExtractKey{}(__node->_M_v());
2053 size_type __bkt = _M_bucket_index(__code);
2054
2055 // Find the node before an equivalent one or use hint if it exists and
2056 // if it is equivalent.
2057 __node_base_ptr __prev
2058 = __builtin_expect(__hint != nullptr, false)
2059 && this->_M_equals(__k, __code, *__hint)
2060 ? __hint
2061 : _M_find_before_node(__bkt, __k, __code);
2062
2063 if (__prev)
2064 {
2065 // Insert after the node before the equivalent one.
2066 __node->_M_nxt = __prev->_M_nxt;
2067 __prev->_M_nxt = __node;
2068 if (__builtin_expect(__prev == __hint, false))
2069 // hint might be the last bucket node, in this case we need to
2070 // update next bucket.
2071 if (__node->_M_nxt
2072 && !this->_M_equals(__k, __code, *__node->_M_next()))
2073 {
2074 size_type __next_bkt = _M_bucket_index(*__node->_M_next());
2075 if (__next_bkt != __bkt)
2076 _M_buckets[__next_bkt] = __node;
2077 }
2078 }
2079 else
2080 // The inserted node has no equivalent in the hashtable. We must
2081 // insert the new node at the beginning of the bucket to preserve
2082 // equivalent elements' relative positions.
2083 _M_insert_bucket_begin(__bkt, __node);
2084 ++_M_element_count;
2085 return iterator(__node);
2086 }
2087
2088 // Insert v if no element with its key is already present.
2089 template<typename _Key, typename _Value, typename _Alloc,
2090 typename _ExtractKey, typename _Equal,
2091 typename _Hash, typename _RangeHash, typename _Unused,
2092 typename _RehashPolicy, typename _Traits>
2093 template<typename _Arg, typename _NodeGenerator>
2094 auto
2095 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2096 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2097 _M_insert(_Arg&& __v, const _NodeGenerator& __node_gen,
2098 true_type /* __uks */)
2099 -> pair<iterator, bool>
2100 {
2101 const key_type& __k = _ExtractKey{}(__v);
2102 __hash_code __code = this->_M_hash_code(__k);
2103 size_type __bkt = _M_bucket_index(__code);
2104
2105 if (__node_ptr __node = _M_find_node(__bkt, key: __k, c: __code))
2106 return { iterator(__node), false };
2107
2108 _Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this };
2109 auto __pos
2110 = _M_insert_unique_node(__bkt, __code, node: __node._M_node);
2111 __node._M_node = nullptr;
2112 return { __pos, true };
2113 }
2114
2115 // Insert v unconditionally.
2116 template<typename _Key, typename _Value, typename _Alloc,
2117 typename _ExtractKey, typename _Equal,
2118 typename _Hash, typename _RangeHash, typename _Unused,
2119 typename _RehashPolicy, typename _Traits>
2120 template<typename _Arg, typename _NodeGenerator>
2121 auto
2122 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2123 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2124 _M_insert(const_iterator __hint, _Arg&& __v,
2125 const _NodeGenerator& __node_gen,
2126 false_type /* __uks */)
2127 -> iterator
2128 {
2129 // First compute the hash code so that we don't do anything if it
2130 // throws.
2131 __hash_code __code = this->_M_hash_code(_ExtractKey{}(__v));
2132
2133 // Second allocate new node so that we don't rehash if it throws.
2134 _Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this };
2135 auto __pos
2136 = _M_insert_multi_node(hint: __hint._M_cur, __code, node: __node._M_node);
2137 __node._M_node = nullptr;
2138 return __pos;
2139 }
2140
2141 template<typename _Key, typename _Value, typename _Alloc,
2142 typename _ExtractKey, typename _Equal,
2143 typename _Hash, typename _RangeHash, typename _Unused,
2144 typename _RehashPolicy, typename _Traits>
2145 auto
2146 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2147 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2148 erase(const_iterator __it)
2149 -> iterator
2150 {
2151 __node_ptr __n = __it._M_cur;
2152 std::size_t __bkt = _M_bucket_index(*__n);
2153
2154 // Look for previous node to unlink it from the erased one, this
2155 // is why we need buckets to contain the before begin to make
2156 // this search fast.
2157 __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
2158 return _M_erase(__bkt, __prev_n, __n);
2159 }
2160
2161 template<typename _Key, typename _Value, typename _Alloc,
2162 typename _ExtractKey, typename _Equal,
2163 typename _Hash, typename _RangeHash, typename _Unused,
2164 typename _RehashPolicy, typename _Traits>
2165 auto
2166 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2167 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2168 _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n)
2169 -> iterator
2170 {
2171 if (__prev_n == _M_buckets[__bkt])
2172 _M_remove_bucket_begin(__bkt, next: __n->_M_next(),
2173 next_bkt: __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
2174 else if (__n->_M_nxt)
2175 {
2176 size_type __next_bkt = _M_bucket_index(*__n->_M_next());
2177 if (__next_bkt != __bkt)
2178 _M_buckets[__next_bkt] = __prev_n;
2179 }
2180
2181 __prev_n->_M_nxt = __n->_M_nxt;
2182 iterator __result(__n->_M_next());
2183 this->_M_deallocate_node(__n);
2184 --_M_element_count;
2185
2186 return __result;
2187 }
2188
2189 template<typename _Key, typename _Value, typename _Alloc,
2190 typename _ExtractKey, typename _Equal,
2191 typename _Hash, typename _RangeHash, typename _Unused,
2192 typename _RehashPolicy, typename _Traits>
2193 auto
2194 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2195 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2196 _M_erase(true_type /* __uks */, const key_type& __k)
2197 -> size_type
2198 {
2199 __hash_code __code = this->_M_hash_code(__k);
2200 std::size_t __bkt = _M_bucket_index(__code);
2201
2202 // Look for the node before the first matching node.
2203 __node_base_ptr __prev_n = _M_find_before_node(__bkt, __k, __code);
2204 if (!__prev_n)
2205 return 0;
2206
2207 // We found a matching node, erase it.
2208 __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2209 _M_erase(__bkt, __prev_n, __n);
2210 return 1;
2211 }
2212
2213 template<typename _Key, typename _Value, typename _Alloc,
2214 typename _ExtractKey, typename _Equal,
2215 typename _Hash, typename _RangeHash, typename _Unused,
2216 typename _RehashPolicy, typename _Traits>
2217 auto
2218 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2219 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2220 _M_erase(false_type /* __uks */, const key_type& __k)
2221 -> size_type
2222 {
2223 __hash_code __code = this->_M_hash_code(__k);
2224 std::size_t __bkt = _M_bucket_index(__code);
2225
2226 // Look for the node before the first matching node.
2227 __node_base_ptr __prev_n = _M_find_before_node(__bkt, __k, __code);
2228 if (!__prev_n)
2229 return 0;
2230
2231 // _GLIBCXX_RESOLVE_LIB_DEFECTS
2232 // 526. Is it undefined if a function in the standard changes
2233 // in parameters?
2234 // We use one loop to find all matching nodes and another to deallocate
2235 // them so that the key stays valid during the first loop. It might be
2236 // invalidated indirectly when destroying nodes.
2237 __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2238 __node_ptr __n_last = __n->_M_next();
2239 while (__n_last && this->_M_node_equals(*__n, *__n_last))
2240 __n_last = __n_last->_M_next();
2241
2242 std::size_t __n_last_bkt = __n_last ? _M_bucket_index(*__n_last) : __bkt;
2243
2244 // Deallocate nodes.
2245 size_type __result = 0;
2246 do
2247 {
2248 __node_ptr __p = __n->_M_next();
2249 this->_M_deallocate_node(__n);
2250 __n = __p;
2251 ++__result;
2252 }
2253 while (__n != __n_last);
2254
2255 _M_element_count -= __result;
2256 if (__prev_n == _M_buckets[__bkt])
2257 _M_remove_bucket_begin(__bkt, next: __n_last, next_bkt: __n_last_bkt);
2258 else if (__n_last_bkt != __bkt)
2259 _M_buckets[__n_last_bkt] = __prev_n;
2260 __prev_n->_M_nxt = __n_last;
2261 return __result;
2262 }
2263
2264 template<typename _Key, typename _Value, typename _Alloc,
2265 typename _ExtractKey, typename _Equal,
2266 typename _Hash, typename _RangeHash, typename _Unused,
2267 typename _RehashPolicy, typename _Traits>
2268 auto
2269 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2270 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2271 erase(const_iterator __first, const_iterator __last)
2272 -> iterator
2273 {
2274 __node_ptr __n = __first._M_cur;
2275 __node_ptr __last_n = __last._M_cur;
2276 if (__n == __last_n)
2277 return iterator(__n);
2278
2279 std::size_t __bkt = _M_bucket_index(*__n);
2280
2281 __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
2282 bool __is_bucket_begin = __n == _M_bucket_begin(__bkt);
2283 std::size_t __n_bkt = __bkt;
2284 for (;;)
2285 {
2286 do
2287 {
2288 __node_ptr __tmp = __n;
2289 __n = __n->_M_next();
2290 this->_M_deallocate_node(__tmp);
2291 --_M_element_count;
2292 if (!__n)
2293 break;
2294 __n_bkt = _M_bucket_index(*__n);
2295 }
2296 while (__n != __last_n && __n_bkt == __bkt);
2297 if (__is_bucket_begin)
2298 _M_remove_bucket_begin(__bkt, next: __n, next_bkt: __n_bkt);
2299 if (__n == __last_n)
2300 break;
2301 __is_bucket_begin = true;
2302 __bkt = __n_bkt;
2303 }
2304
2305 if (__n && (__n_bkt != __bkt || __is_bucket_begin))
2306 _M_buckets[__n_bkt] = __prev_n;
2307 __prev_n->_M_nxt = __n;
2308 return iterator(__n);
2309 }
2310
2311 template<typename _Key, typename _Value, typename _Alloc,
2312 typename _ExtractKey, typename _Equal,
2313 typename _Hash, typename _RangeHash, typename _Unused,
2314 typename _RehashPolicy, typename _Traits>
2315 void
2316 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2317 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2318 clear() noexcept
2319 {
2320 this->_M_deallocate_nodes(_M_begin());
2321 __builtin_memset(_M_buckets, 0,
2322 _M_bucket_count * sizeof(__node_base_ptr));
2323 _M_element_count = 0;
2324 _M_before_begin._M_nxt = nullptr;
2325 }
2326
2327 template<typename _Key, typename _Value, typename _Alloc,
2328 typename _ExtractKey, typename _Equal,
2329 typename _Hash, typename _RangeHash, typename _Unused,
2330 typename _RehashPolicy, typename _Traits>
2331 void
2332 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2333 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2334 rehash(size_type __bkt_count)
2335 {
2336 const __rehash_state& __saved_state = _M_rehash_policy._M_state();
2337 __bkt_count
2338 = std::max(_M_rehash_policy._M_bkt_for_elements(_M_element_count + 1),
2339 __bkt_count);
2340 __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count);
2341
2342 if (__bkt_count != _M_bucket_count)
2343 _M_rehash(__bkt_count, state: __saved_state);
2344 else
2345 // No rehash, restore previous state to keep it consistent with
2346 // container state.
2347 _M_rehash_policy._M_reset(__saved_state);
2348 }
2349
2350 template<typename _Key, typename _Value, typename _Alloc,
2351 typename _ExtractKey, typename _Equal,
2352 typename _Hash, typename _RangeHash, typename _Unused,
2353 typename _RehashPolicy, typename _Traits>
2354 void
2355 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2356 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2357 _M_rehash(size_type __bkt_count, const __rehash_state& __state)
2358 {
2359 __try
2360 {
2361 _M_rehash_aux(__bkt_count, __unique_keys{});
2362 }
2363 __catch(...)
2364 {
2365 // A failure here means that buckets allocation failed. We only
2366 // have to restore hash policy previous state.
2367 _M_rehash_policy._M_reset(__state);
2368 __throw_exception_again;
2369 }
2370 }
2371
2372 // Rehash when there is no equivalent elements.
2373 template<typename _Key, typename _Value, typename _Alloc,
2374 typename _ExtractKey, typename _Equal,
2375 typename _Hash, typename _RangeHash, typename _Unused,
2376 typename _RehashPolicy, typename _Traits>
2377 void
2378 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2379 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2380 _M_rehash_aux(size_type __bkt_count, true_type /* __uks */)
2381 {
2382 __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2383 __node_ptr __p = _M_begin();
2384 _M_before_begin._M_nxt = nullptr;
2385 std::size_t __bbegin_bkt = 0;
2386 while (__p)
2387 {
2388 __node_ptr __next = __p->_M_next();
2389 std::size_t __bkt
2390 = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2391 if (!__new_buckets[__bkt])
2392 {
2393 __p->_M_nxt = _M_before_begin._M_nxt;
2394 _M_before_begin._M_nxt = __p;
2395 __new_buckets[__bkt] = &_M_before_begin;
2396 if (__p->_M_nxt)
2397 __new_buckets[__bbegin_bkt] = __p;
2398 __bbegin_bkt = __bkt;
2399 }
2400 else
2401 {
2402 __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2403 __new_buckets[__bkt]->_M_nxt = __p;
2404 }
2405
2406 __p = __next;
2407 }
2408
2409 _M_deallocate_buckets();
2410 _M_bucket_count = __bkt_count;
2411 _M_buckets = __new_buckets;
2412 }
2413
2414 // Rehash when there can be equivalent elements, preserve their relative
2415 // order.
2416 template<typename _Key, typename _Value, typename _Alloc,
2417 typename _ExtractKey, typename _Equal,
2418 typename _Hash, typename _RangeHash, typename _Unused,
2419 typename _RehashPolicy, typename _Traits>
2420 void
2421 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2422 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2423 _M_rehash_aux(size_type __bkt_count, false_type /* __uks */)
2424 {
2425 __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2426 __node_ptr __p = _M_begin();
2427 _M_before_begin._M_nxt = nullptr;
2428 std::size_t __bbegin_bkt = 0;
2429 std::size_t __prev_bkt = 0;
2430 __node_ptr __prev_p = nullptr;
2431 bool __check_bucket = false;
2432
2433 while (__p)
2434 {
2435 __node_ptr __next = __p->_M_next();
2436 std::size_t __bkt
2437 = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2438
2439 if (__prev_p && __prev_bkt == __bkt)
2440 {
2441 // Previous insert was already in this bucket, we insert after
2442 // the previously inserted one to preserve equivalent elements
2443 // relative order.
2444 __p->_M_nxt = __prev_p->_M_nxt;
2445 __prev_p->_M_nxt = __p;
2446
2447 // Inserting after a node in a bucket require to check that we
2448 // haven't change the bucket last node, in this case next
2449 // bucket containing its before begin node must be updated. We
2450 // schedule a check as soon as we move out of the sequence of
2451 // equivalent nodes to limit the number of checks.
2452 __check_bucket = true;
2453 }
2454 else
2455 {
2456 if (__check_bucket)
2457 {
2458 // Check if we shall update the next bucket because of
2459 // insertions into __prev_bkt bucket.
2460 if (__prev_p->_M_nxt)
2461 {
2462 std::size_t __next_bkt
2463 = __hash_code_base::_M_bucket_index(
2464 *__prev_p->_M_next(), __bkt_count);
2465 if (__next_bkt != __prev_bkt)
2466 __new_buckets[__next_bkt] = __prev_p;
2467 }
2468 __check_bucket = false;
2469 }
2470
2471 if (!__new_buckets[__bkt])
2472 {
2473 __p->_M_nxt = _M_before_begin._M_nxt;
2474 _M_before_begin._M_nxt = __p;
2475 __new_buckets[__bkt] = &_M_before_begin;
2476 if (__p->_M_nxt)
2477 __new_buckets[__bbegin_bkt] = __p;
2478 __bbegin_bkt = __bkt;
2479 }
2480 else
2481 {
2482 __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2483 __new_buckets[__bkt]->_M_nxt = __p;
2484 }
2485 }
2486 __prev_p = __p;
2487 __prev_bkt = __bkt;
2488 __p = __next;
2489 }
2490
2491 if (__check_bucket && __prev_p->_M_nxt)
2492 {
2493 std::size_t __next_bkt
2494 = __hash_code_base::_M_bucket_index(*__prev_p->_M_next(),
2495 __bkt_count);
2496 if (__next_bkt != __prev_bkt)
2497 __new_buckets[__next_bkt] = __prev_p;
2498 }
2499
2500 _M_deallocate_buckets();
2501 _M_bucket_count = __bkt_count;
2502 _M_buckets = __new_buckets;
2503 }
2504
2505#if __cplusplus > 201402L
2506 template<typename, typename, typename> class _Hash_merge_helper { };
2507#endif // C++17
2508
2509#if __cpp_deduction_guides >= 201606
2510 // Used to constrain deduction guides
2511 template<typename _Hash>
2512 using _RequireNotAllocatorOrIntegral
2513 = __enable_if_t<!__or_<is_integral<_Hash>, __is_allocator<_Hash>>::value>;
2514#endif
2515
2516/// @endcond
2517_GLIBCXX_END_NAMESPACE_VERSION
2518} // namespace std
2519
2520#endif // _HASHTABLE_H
2521

source code of include/c++/11/bits/hashtable.h