libstdc++
bits/hashtable.h
Go to the documentation of this file.
1// hashtable.h header -*- C++ -*-
2
3// Copyright (C) 2007-2026 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file bits/hashtable.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{unordered_map, unordered_set}
28 */
29
30#ifndef _HASHTABLE_H
31#define _HASHTABLE_H 1
32
33#ifdef _GLIBCXX_SYSHDR
34#pragma GCC system_header
35#endif
36
39#include <bits/stl_algobase.h> // fill_n, is_permutation
40#include <bits/stl_function.h> // __has_is_transparent_t
41#ifdef __glibcxx_node_extract // >= C++17 && HOSTED
42# include <bits/node_handle.h>
43#endif
44
45#pragma GCC diagnostic push
46#pragma GCC diagnostic ignored "-Wc++11-extensions"
47
48namespace std _GLIBCXX_VISIBILITY(default)
49{
50_GLIBCXX_BEGIN_NAMESPACE_VERSION
51/// @cond undocumented
52
53 template<typename _Tp, typename _Hash>
54 using __cache_default
55 = __not_<__and_<// Do not cache for fast hasher.
57 // Mandatory for the rehash process.
58 __is_nothrow_invocable<const _Hash&, const _Tp&>>>;
59
60 // Helper to conditionally delete the default constructor.
61 // The _Hash_node_base type is used to distinguish this specialization
62 // from any other potentially-overlapping subobjects of the hashtable.
63 template<typename _Equal, typename _Hash, typename _Allocator>
64 using _Hashtable_enable_default_ctor
65 = _Enable_default_constructor<__and_<is_default_constructible<_Equal>,
68 __detail::_Hash_node_base>;
69
70 /**
71 * Primary class template _Hashtable.
72 *
73 * @ingroup hashtable-detail
74 *
75 * @tparam _Value CopyConstructible type.
76 *
77 * @tparam _Key CopyConstructible type.
78 *
79 * @tparam _Alloc An allocator type
80 * ([lib.allocator.requirements]) whose _Alloc::value_type is
81 * _Value. As a conforming extension, we allow for
82 * _Alloc::value_type != _Value.
83 *
84 * @tparam _ExtractKey Function object that takes an object of type
85 * _Value and returns a value of type _Key.
86 *
87 * @tparam _Equal Function object that takes two objects of type k
88 * and returns a bool-like value that is true if the two objects
89 * are considered equal.
90 *
91 * @tparam _Hash The hash function. A unary function object with
92 * argument type _Key and result type size_t. Return values should
93 * be distributed over the entire range [0, numeric_limits<size_t>:::max()].
94 *
95 * @tparam _RangeHash The range-hashing function (in the terminology of
96 * Tavori and Dreizin). A binary function object whose argument
97 * types and result type are all size_t. Given arguments r and N,
98 * the return value is in the range [0, N).
99 *
100 * @tparam _Unused Not used.
101 *
102 * @tparam _RehashPolicy Policy class with three members, all of
103 * which govern the bucket count. _M_next_bkt(n) returns a bucket
104 * count no smaller than n. _M_bkt_for_elements(n) returns a
105 * bucket count appropriate for an element count of n.
106 * _M_need_rehash(n_bkt, n_elt, n_ins) determines whether, if the
107 * current bucket count is n_bkt and the current element count is
108 * n_elt, we need to increase the bucket count for n_ins insertions.
109 * If so, returns make_pair(true, n), where n is the new bucket count. If
110 * not, returns make_pair(false, <anything>)
111 *
112 * @tparam _Traits Compile-time class with three boolean
113 * std::integral_constant members: __cache_hash_code, __constant_iterators,
114 * __unique_keys.
115 *
116 * Each _Hashtable data structure has:
117 *
118 * - _Bucket[] _M_buckets
119 * - _Hash_node_base _M_before_begin
120 * - size_type _M_bucket_count
121 * - size_type _M_element_count
122 *
123 * with _Bucket being _Hash_node_base* and _Hash_node containing:
124 *
125 * - _Hash_node* _M_next
126 * - Tp _M_value
127 * - size_t _M_hash_code if cache_hash_code is true
128 *
129 * In terms of Standard containers the hashtable is like the aggregation of:
130 *
131 * - std::forward_list<_Node> containing the elements
132 * - std::vector<std::forward_list<_Node>::iterator> representing the buckets
133 *
134 * The non-empty buckets contain the node before the first node in the
135 * bucket. This design makes it possible to implement something like a
136 * std::forward_list::insert_after on container insertion and
137 * std::forward_list::erase_after on container erase
138 * calls. _M_before_begin is equivalent to
139 * std::forward_list::before_begin. Empty buckets contain
140 * nullptr. Note that one of the non-empty buckets contains
141 * &_M_before_begin which is not a dereferenceable node so the
142 * node pointer in a bucket shall never be dereferenced, only its
143 * next node can be.
144 *
145 * Walking through a bucket's nodes requires a check on the hash code to
146 * see if each node is still in the bucket. Such a design assumes a
147 * quite efficient hash functor and is one of the reasons it is
148 * highly advisable to set __cache_hash_code to true.
149 *
150 * The container iterators are simply built from nodes. This way
151 * incrementing the iterator is perfectly efficient independent of
152 * how many empty buckets there are in the container.
153 *
154 * On insert we compute the element's hash code and use it to find the
155 * bucket index. If the element must be inserted in an empty bucket
156 * we add it at the beginning of the singly linked list and make the
157 * bucket point to _M_before_begin. The bucket that used to point to
158 * _M_before_begin, if any, is updated to point to its new before
159 * begin node.
160 *
161 * Note that all equivalent values, if any, are next to each other, if
162 * we find a non-equivalent value after an equivalent one it means that
163 * we won't find any new equivalent value.
164 *
165 * On erase, the simple iterator design requires using the hash
166 * functor to get the index of the bucket to update. For this
167 * reason, when __cache_hash_code is set to false the hash functor must
168 * not throw and this is enforced by a static assertion.
169 *
170 * Functionality is implemented by decomposition into base classes,
171 * where the derived _Hashtable class is used in _Map_base and
172 * _Rehash_base base classes to access the
173 * "this" pointer. _Hashtable_base is used in the base classes as a
174 * non-recursive, fully-completed-type so that detailed nested type
175 * information, such as iterator type and node type, can be
176 * used. This is similar to the "Curiously Recurring Template
177 * Pattern" (CRTP) technique, but uses a reconstructed, not
178 * explicitly passed, template pattern.
179 *
180 * Base class templates are:
181 * - __detail::_Hashtable_base
182 * - __detail::_Map_base
183 * - __detail::_Rehash_base
184 */
185 template<typename _Key, typename _Value, typename _Alloc,
186 typename _ExtractKey, typename _Equal,
187 typename _Hash, typename _RangeHash, typename _Unused,
188 typename _RehashPolicy, typename _Traits>
189 class _Hashtable
190 : public __detail::_Hashtable_base<_Key, _Value, _ExtractKey, _Equal,
191 _Hash, _RangeHash, _Unused, _Traits>,
192 public __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
193 _Hash, _RangeHash, _Unused,
194 _RehashPolicy, _Traits>,
195 public __detail::_Rehash_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
196 _Hash, _RangeHash, _Unused,
197 _RehashPolicy, _Traits>,
198 private __detail::_Hashtable_alloc<
199 __alloc_rebind<_Alloc,
200 __detail::_Hash_node<_Value,
201 _Traits::__hash_cached::value>>>,
202 private _Hashtable_enable_default_ctor<_Equal, _Hash, _Alloc>
203 {
204 static_assert(is_same<typename remove_cv<_Value>::type, _Value>::value,
205 "unordered container must have a non-const, non-volatile value_type");
206#if __cplusplus > 201703L || defined __STRICT_ANSI__
207 static_assert(is_same<typename _Alloc::value_type, _Value>{},
208 "unordered container must have the same value_type as its allocator");
209#endif
210 static_assert(is_copy_constructible<_Hash>::value,
211 "hash function must be copy constructible");
212
213 using __traits_type = _Traits;
214 using __hash_cached = typename __traits_type::__hash_cached;
215 using __constant_iterators = typename __traits_type::__constant_iterators;
216 using __node_type = __detail::_Hash_node<_Value, __hash_cached::value>;
217 using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
218
219 using __hashtable_alloc = __detail::_Hashtable_alloc<__node_alloc_type>;
220
221 using __node_value_type =
222 __detail::_Hash_node_value<_Value, __hash_cached::value>;
223 using __node_ptr = typename __hashtable_alloc::__node_ptr;
224 using __value_alloc_traits =
225 typename __hashtable_alloc::__value_alloc_traits;
226 using __node_alloc_traits =
227 typename __hashtable_alloc::__node_alloc_traits;
228 using __node_base = typename __hashtable_alloc::__node_base;
229 using __node_base_ptr = typename __hashtable_alloc::__node_base_ptr;
230 using __buckets_ptr = typename __hashtable_alloc::__buckets_ptr;
231
232 using __enable_default_ctor
233 = _Hashtable_enable_default_ctor<_Equal, _Hash, _Alloc>;
234 using __rehash_guard_t
235 = __detail::_RehashStateGuard<_RehashPolicy>;
236
237 public:
238 typedef _Key key_type;
239 typedef _Value value_type;
240 typedef _Alloc allocator_type;
241 typedef _Equal key_equal;
242
243 // mapped_type, if present, comes from _Map_base.
244 // hasher, if present, comes from _Hash_code_base/_Hashtable_base.
245 typedef typename __value_alloc_traits::pointer pointer;
246 typedef typename __value_alloc_traits::const_pointer const_pointer;
247 typedef value_type& reference;
248 typedef const value_type& const_reference;
249
250 using iterator
251 = __detail::_Node_iterator<_Value, __constant_iterators::value,
252 __hash_cached::value>;
253
254 using const_iterator
255 = __detail::_Node_const_iterator<_Value, __constant_iterators::value,
256 __hash_cached::value>;
257
258 using local_iterator = __detail::_Local_iterator<key_type, _Value,
259 _ExtractKey, _Hash, _RangeHash, _Unused,
260 __constant_iterators::value,
261 __hash_cached::value>;
262
263 using const_local_iterator = __detail::_Local_const_iterator<
264 key_type, _Value,
265 _ExtractKey, _Hash, _RangeHash, _Unused,
266 __constant_iterators::value, __hash_cached::value>;
267
268 private:
269 using __rehash_type = _RehashPolicy;
270
271 using __unique_keys = typename __traits_type::__unique_keys;
272
273 using __hashtable_base = __detail::
274 _Hashtable_base<_Key, _Value, _ExtractKey,
275 _Equal, _Hash, _RangeHash, _Unused, _Traits>;
276
277 using __hash_code_base = typename __hashtable_base::__hash_code_base;
278 using __hash_code = typename __hashtable_base::__hash_code;
279 using __ireturn_type = __conditional_t<__unique_keys::value,
281 iterator>;
282
283 using __map_base = __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey,
284 _Equal, _Hash, _RangeHash, _Unused,
285 _RehashPolicy, _Traits>;
286
287 using __rehash_base = __detail::_Rehash_base<_Key, _Value, _Alloc,
288 _ExtractKey, _Equal,
289 _Hash, _RangeHash, _Unused,
290 _RehashPolicy, _Traits>;
291
292 using __node_builder_t = __detail::_NodeBuilder<_ExtractKey>;
293
294 // Simple RAII type for managing a node containing an element
295 struct _Scoped_node
296 {
297 // Take ownership of a node with a constructed element.
298 _Scoped_node(__node_ptr __n, __hashtable_alloc* __h)
299 : _M_h(__h), _M_node(__n) { }
300
301 // Allocate a node and construct an element within it.
302 template<typename... _Args>
303 _Scoped_node(__hashtable_alloc* __h, _Args&&... __args)
304 : _M_h(__h),
305 _M_node(__h->_M_allocate_node(std::forward<_Args>(__args)...))
306 { }
307
308 // Destroy element and deallocate node.
309 ~_Scoped_node() { if (_M_node) _M_h->_M_deallocate_node(_M_node); };
310
311 _Scoped_node(const _Scoped_node&) = delete;
312 _Scoped_node& operator=(const _Scoped_node&) = delete;
313
314 __hashtable_alloc* _M_h;
315 __node_ptr _M_node;
316 };
317
318 // Compile-time diagnostics.
319
320 // _Hash_code_base has everything protected, so use this derived type to
321 // access it.
322 struct __hash_code_base_access : __hash_code_base
323 { using __hash_code_base::_M_bucket_index; };
324
325 // To get bucket index we need _RangeHash to be non-throwing.
326 static_assert(is_nothrow_default_constructible<_RangeHash>::value,
327 "Functor used to map hash code to bucket index"
328 " must be nothrow default constructible");
329 static_assert(noexcept(
330 std::declval<const _RangeHash&>()((std::size_t)0, (std::size_t)0)),
331 "Functor used to map hash code to bucket index must be"
332 " noexcept");
333
334 // To compute bucket index we also need _ExtractKey to be non-throwing.
335 static_assert(is_nothrow_default_constructible<_ExtractKey>::value,
336 "_ExtractKey must be nothrow default constructible");
337 static_assert(noexcept(
339 "_ExtractKey functor must be noexcept invocable");
340
341 template<typename _Keya, typename _Valuea, typename _Alloca,
342 typename _ExtractKeya, typename _Equala,
343 typename _Hasha, typename _RangeHasha, typename _Unuseda,
344 typename _RehashPolicya, typename _Traitsa,
345 bool _Unique_keysa>
346 friend struct __detail::_Map_base;
347
348 public:
349 using size_type = typename __hashtable_base::size_type;
350 using difference_type = typename __hashtable_base::difference_type;
351
352#ifdef __glibcxx_node_extract // >= C++17 && HOSTED
353 using node_type = _Node_handle<_Key, _Value, __node_alloc_type>;
354 using insert_return_type = _Node_insert_return<iterator, node_type>;
355#endif
356
357 private:
358 __buckets_ptr _M_buckets = &_M_single_bucket;
359 size_type _M_bucket_count = 1;
360 __node_base _M_before_begin;
361 size_type _M_element_count = 0;
362 _RehashPolicy _M_rehash_policy;
363
364 // A single bucket used when only need for 1 bucket. Especially
365 // interesting in move semantic to leave hashtable with only 1 bucket
366 // which is not allocated so that we can have those operations noexcept
367 // qualified.
368 // Note that we can't leave hashtable with 0 bucket without adding
369 // numerous checks in the code to avoid 0 modulus.
370 __node_base_ptr _M_single_bucket = nullptr;
371
372 void
373 _M_update_bbegin()
374 {
375 if (auto __begin = _M_begin())
376 _M_buckets[_M_bucket_index(*__begin)] = &_M_before_begin;
377 }
378
379 void
380 _M_update_bbegin(__node_ptr __n)
381 {
382 _M_before_begin._M_nxt = __n;
383 _M_update_bbegin();
384 }
385
386 bool
387 _M_uses_single_bucket(__buckets_ptr __bkts) const
388 { return __builtin_expect(__bkts == &_M_single_bucket, false); }
389
390 bool
391 _M_uses_single_bucket() const
392 { return _M_uses_single_bucket(_M_buckets); }
393
394 static constexpr size_t
395 __small_size_threshold() noexcept
396 {
397 return
398 __detail::_Hashtable_hash_traits<_Hash>::__small_size_threshold();
399 }
400
401 __hashtable_alloc&
402 _M_base_alloc() { return *this; }
403
404 __buckets_ptr
405 _M_allocate_buckets(size_type __bkt_count)
406 {
407 if (__builtin_expect(__bkt_count == 1, false))
408 {
409 _M_single_bucket = nullptr;
410 return &_M_single_bucket;
411 }
412
413 return __hashtable_alloc::_M_allocate_buckets(__bkt_count);
414 }
415
416 void
417 _M_deallocate_buckets(__buckets_ptr __bkts, size_type __bkt_count)
418 {
419 if (_M_uses_single_bucket(__bkts))
420 return;
421
422 __hashtable_alloc::_M_deallocate_buckets(__bkts, __bkt_count);
423 }
424
425 void
426 _M_deallocate_buckets()
427 { _M_deallocate_buckets(_M_buckets, _M_bucket_count); }
428
429 // Gets bucket begin, deals with the fact that non-empty buckets contain
430 // their before begin node.
431 __node_ptr
432 _M_bucket_begin(size_type __bkt) const
433 {
434 __node_base_ptr __n = _M_buckets[__bkt];
435 return __n ? static_cast<__node_ptr>(__n->_M_nxt) : nullptr;
436 }
437
438 __node_ptr
439 _M_begin() const
440 { return static_cast<__node_ptr>(_M_before_begin._M_nxt); }
441
442 // Assign *this using another _Hashtable instance. Whether elements
443 // are copied or moved depends on the _Ht reference.
444 template<typename _Ht>
445 void
446 _M_assign_elements(_Ht&&);
447
448 template<typename _Ht>
449 void
450 _M_assign(_Ht&& __ht)
451 {
452 __detail::_AllocNode<__node_alloc_type> __alloc_node_gen(*this);
453 _M_assign(std::forward<_Ht>(__ht), __alloc_node_gen);
454 }
455
456 template<typename _Ht, typename _NodeGenerator>
457 void
458 _M_assign(_Ht&&, _NodeGenerator&);
459
460 void
461 _M_move_assign(_Hashtable&&, true_type);
462
463 void
464 _M_move_assign(_Hashtable&&, false_type);
465
466 void
467 _M_reset() noexcept;
468
469 _Hashtable(const _Hash& __h, const _Equal& __eq,
470 const allocator_type& __a)
471 : __hashtable_base(__h, __eq),
472 __hashtable_alloc(__node_alloc_type(__a)),
473 __enable_default_ctor(_Enable_default_constructor_tag{})
474 { }
475
476 template<bool _No_realloc = true>
477 static constexpr bool
478 _S_nothrow_move()
479 {
480#if __cpp_constexpr >= 201304 // >= C++14
481# pragma GCC diagnostic push
482# pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
483 if constexpr (_No_realloc)
484 if constexpr (is_nothrow_copy_constructible<_Hash>::value)
485 return is_nothrow_copy_constructible<_Equal>::value;
486 return false;
487# pragma GCC diagnostic pop
488#else // In C++11 a constexpr function must be a single statement.
489 return __and_<__bool_constant<_No_realloc>,
490 is_nothrow_copy_constructible<_Hash>,
491 is_nothrow_copy_constructible<_Equal>>::value;
492#endif
493 }
494
495 _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
496 true_type /* alloc always equal */)
497 noexcept(_S_nothrow_move());
498
499 _Hashtable(_Hashtable&&, __node_alloc_type&&,
500 false_type /* alloc always equal */);
501
502 template<typename _InputIterator>
503 _Hashtable(_InputIterator __first, _InputIterator __last,
504 size_type __bkt_count_hint,
505 const _Hash&, const _Equal&, const allocator_type&,
506 true_type __uks);
507
508 template<typename _InputIterator>
509 _Hashtable(_InputIterator __first, _InputIterator __last,
510 size_type __bkt_count_hint,
511 const _Hash&, const _Equal&, const allocator_type&,
512 false_type __uks);
513
514 public:
515 // Constructor, destructor, assignment, swap
516 _Hashtable() = default;
517
518 _Hashtable(const _Hashtable&);
519
520 _Hashtable(const _Hashtable&, const allocator_type&);
521
522 explicit
523 _Hashtable(size_type __bkt_count_hint,
524 const _Hash& __hf = _Hash(),
525 const key_equal& __eql = key_equal(),
526 const allocator_type& __a = allocator_type());
527
528 // Use delegating constructors.
529 _Hashtable(_Hashtable&& __ht)
530 noexcept(_S_nothrow_move())
531 : _Hashtable(std::move(__ht), std::move(__ht._M_node_allocator()),
532 true_type{})
533 { }
534
535 _Hashtable(_Hashtable&& __ht, const allocator_type& __a)
536 noexcept(_S_nothrow_move<__node_alloc_traits::_S_always_equal()>())
537 : _Hashtable(std::move(__ht), __node_alloc_type(__a),
538 typename __node_alloc_traits::is_always_equal{})
539 { }
540
541 explicit
542 _Hashtable(const allocator_type& __a)
543 : __hashtable_alloc(__node_alloc_type(__a)),
544 __enable_default_ctor(_Enable_default_constructor_tag{})
545 { }
546
547 template<typename _InputIterator>
548 _Hashtable(_InputIterator __f, _InputIterator __l,
549 size_type __bkt_count_hint = 0,
550 const _Hash& __hf = _Hash(),
551 const key_equal& __eql = key_equal(),
552 const allocator_type& __a = allocator_type())
553 : _Hashtable(__f, __l, __bkt_count_hint, __hf, __eql, __a,
554 __unique_keys{})
555 { }
556
557 _Hashtable(initializer_list<value_type> __l,
558 size_type __bkt_count_hint = 0,
559 const _Hash& __hf = _Hash(),
560 const key_equal& __eql = key_equal(),
561 const allocator_type& __a = allocator_type())
562 : _Hashtable(__l.begin(), __l.end(), __bkt_count_hint,
563 __hf, __eql, __a, __unique_keys{})
564 { }
565
566 _Hashtable&
567 operator=(const _Hashtable& __ht);
568
569 _Hashtable&
570 operator=(_Hashtable&& __ht)
571 noexcept(__node_alloc_traits::_S_nothrow_move()
572 && is_nothrow_move_assignable<_Hash>::value
573 && is_nothrow_move_assignable<_Equal>::value)
574 {
575 constexpr bool __move_storage =
576 __node_alloc_traits::_S_propagate_on_move_assign()
577 || __node_alloc_traits::_S_always_equal();
578 _M_move_assign(std::move(__ht), __bool_constant<__move_storage>());
579 return *this;
580 }
581
582#pragma GCC diagnostic push
583#pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
584 _Hashtable&
585 operator=(initializer_list<value_type> __l)
586 {
587 using __reuse_or_alloc_node_gen_t =
588 __detail::_ReuseOrAllocNode<__node_alloc_type>;
589
590 __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
591 _M_before_begin._M_nxt = nullptr;
592 clear();
593
594 // We assume that all elements of __l are likely to be inserted.
595 auto __l_bkt_count = _M_rehash_policy._M_bkt_for_elements(__l.size());
596
597 // Excess buckets might have been intentionally reserved by the user,
598 // so rehash if we need to grow, but don't shrink.
599 if (_M_bucket_count < __l_bkt_count)
600 rehash(__l_bkt_count);
601
602 __hash_code __code;
603 size_type __bkt;
604 for (auto& __e : __l)
605 {
606 const key_type& __k = _ExtractKey{}(__e);
607
608 if constexpr (__unique_keys::value)
609 {
610 if (auto __loc = _M_locate(__k))
611 continue; // Found existing element with equivalent key
612 else
613 {
614 __code = __loc._M_hash_code;
615 __bkt = __loc._M_bucket_index;
616 }
617 }
618 else
619 {
620 __code = this->_M_hash_code(__k);
621 __bkt = _M_bucket_index(__code);
622 }
623
624 _M_insert_unique_node(__bkt, __code, __roan(__e));
625 }
626
627 return *this;
628 }
629#pragma GCC diagnostic pop
630
631 ~_Hashtable() noexcept;
632
633 void
634 swap(_Hashtable&)
635 noexcept(__and_<__is_nothrow_swappable<_Hash>,
636 __is_nothrow_swappable<_Equal>>::value);
637
638 // Basic container operations
639 iterator
640 begin() noexcept
641 { return iterator(_M_begin()); }
642
643 const_iterator
644 begin() const noexcept
645 { return const_iterator(_M_begin()); }
646
647 iterator
648 end() noexcept
649 { return iterator(nullptr); }
650
651 const_iterator
652 end() const noexcept
653 { return const_iterator(nullptr); }
654
655 const_iterator
656 cbegin() const noexcept
657 { return const_iterator(_M_begin()); }
658
659 const_iterator
660 cend() const noexcept
661 { return const_iterator(nullptr); }
662
663 size_type
664 size() const noexcept
665 { return _M_element_count; }
666
667 _GLIBCXX_NODISCARD bool
668 empty() const noexcept
669 { return size() == 0; }
670
671 allocator_type
672 get_allocator() const noexcept
673 { return allocator_type(this->_M_node_allocator()); }
674
675 size_type
676 max_size() const noexcept
677 { return __node_alloc_traits::max_size(this->_M_node_allocator()); }
678
679 // Observers
680 key_equal
681 key_eq() const
682 { return this->_M_eq(); }
683
684 // hash_function, if present, comes from _Hash_code_base.
685
686 // Bucket operations
687 size_type
688 bucket_count() const noexcept
689 { return _M_bucket_count; }
690
691 size_type
692 max_bucket_count() const noexcept
693 { return max_size(); }
694
695 size_type
696 bucket_size(size_type __bkt) const
697 { return std::distance(begin(__bkt), end(__bkt)); }
698
699 size_type
700 bucket(const key_type& __k) const
701 { return _M_bucket_index(this->_M_hash_code(__k)); }
702
703 local_iterator
704 begin(size_type __bkt)
705 {
706 return local_iterator(*this, _M_bucket_begin(__bkt),
707 __bkt, _M_bucket_count);
708 }
709
710 local_iterator
711 end(size_type __bkt)
712 { return local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
713
714 const_local_iterator
715 begin(size_type __bkt) const
716 {
717 return const_local_iterator(*this, _M_bucket_begin(__bkt),
718 __bkt, _M_bucket_count);
719 }
720
721 const_local_iterator
722 end(size_type __bkt) const
723 { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
724
725 // DR 691.
726 const_local_iterator
727 cbegin(size_type __bkt) const
728 {
729 return const_local_iterator(*this, _M_bucket_begin(__bkt),
730 __bkt, _M_bucket_count);
731 }
732
733 const_local_iterator
734 cend(size_type __bkt) const
735 { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
736
737 float
738 load_factor() const noexcept
739 {
740 return static_cast<float>(size()) / static_cast<float>(bucket_count());
741 }
742
743 // max_load_factor, if present, comes from _Rehash_base.
744
745 // Generalization of max_load_factor. Extension, not found in
746 // TR1. Only useful if _RehashPolicy is something other than
747 // the default.
748 const _RehashPolicy&
749 __rehash_policy() const
750 { return _M_rehash_policy; }
751
752 void
753 __rehash_policy(const _RehashPolicy& __pol)
754 { _M_rehash_policy = __pol; }
755
756 // Lookup.
757 iterator
758 find(const key_type& __k);
759
760 const_iterator
761 find(const key_type& __k) const;
762
763 size_type
764 count(const key_type& __k) const;
765
767 equal_range(const key_type& __k);
768
770 equal_range(const key_type& __k) const;
771
772#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED
773 template<typename _Kt,
774 typename = __has_is_transparent_t<_Hash, _Kt>,
775 typename = __has_is_transparent_t<_Equal, _Kt>>
776 iterator
777 _M_find_tr(const _Kt& __k);
778
779 template<typename _Kt,
780 typename = __has_is_transparent_t<_Hash, _Kt>,
781 typename = __has_is_transparent_t<_Equal, _Kt>>
782 const_iterator
783 _M_find_tr(const _Kt& __k) const;
784
785 template<typename _Kt,
786 typename = __has_is_transparent_t<_Hash, _Kt>,
787 typename = __has_is_transparent_t<_Equal, _Kt>>
788 size_type
789 _M_count_tr(const _Kt& __k) const;
790
791 template<typename _Kt,
792 typename = __has_is_transparent_t<_Hash, _Kt>,
793 typename = __has_is_transparent_t<_Equal, _Kt>>
794 pair<iterator, iterator>
795 _M_equal_range_tr(const _Kt& __k);
796
797 template<typename _Kt,
798 typename = __has_is_transparent_t<_Hash, _Kt>,
799 typename = __has_is_transparent_t<_Equal, _Kt>>
800 pair<const_iterator, const_iterator>
801 _M_equal_range_tr(const _Kt& __k) const;
802#endif // __glibcxx_generic_unordered_lookup
803
804 void _M_rehash_insert(size_type __n);
805
806 private:
807 // Bucket index computation helpers.
808 size_type
809 _M_bucket_index(const __node_value_type& __n) const noexcept
810 { return __hash_code_base::_M_bucket_index(__n, _M_bucket_count); }
811
812 size_type
813 _M_bucket_index(__hash_code __c) const
814 { return __hash_code_base::_M_bucket_index(__c, _M_bucket_count); }
815
816#pragma GCC diagnostic push
817#pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
818 // Get hash code for a node that comes from another _Hashtable.
819 // Reuse a cached hash code if the hash function is stateless,
820 // otherwise recalculate it using our own hash function.
821 __hash_code
822 _M_hash_code_ext(const __node_value_type& __from) const
823 {
824 if constexpr (__and_<__hash_cached, is_empty<_Hash>>::value)
825 return __from._M_hash_code;
826 else
827 return this->_M_hash_code(_ExtractKey{}(__from._M_v()));
828 }
829
830 // Like _M_bucket_index but when the node is coming from another
831 // container instance.
832 size_type
833 _M_bucket_index_ext(const __node_value_type& __from) const
834 { return _RangeHash{}(_M_hash_code_ext(__from), _M_bucket_count); }
835
836 void
837 _M_copy_code(__node_value_type& __to,
838 const __node_value_type& __from) const
839 {
840 if constexpr (__hash_cached::value)
841 __to._M_hash_code = _M_hash_code_ext(__from);
842 }
843
844 void
845 _M_store_code(__node_value_type& __to, __hash_code __code) const
846 {
847 if constexpr (__hash_cached::value)
848 __to._M_hash_code = __code;
849 }
850#pragma GCC diagnostic pop
851
852 // Find and insert helper functions and types
853
854 // Find the node before the one matching the criteria.
855 __node_base_ptr
856 _M_find_before_node(
857 size_type __bkt, const key_type& __k, __hash_code __code) const
858 { return _M_find_before_node_tr<key_type>(__bkt, __k, __code); }
859
860 template<typename _Kt>
861 __node_base_ptr
862 _M_find_before_node_tr(size_type, const _Kt&, __hash_code) const;
863
864 // A pointer to a particular node and/or a hash code and bucket index
865 // where such a node would be found in the container.
866 struct __location_type
867 {
868 // True if _M_node() is a valid node pointer.
869 explicit operator bool() const noexcept
870 { return static_cast<bool>(_M_before); }
871
872 // An iterator that refers to the node, or end().
873 explicit operator iterator() const noexcept
874 { return iterator(_M_node()); }
875
876 // A const_iterator that refers to the node, or cend().
877 explicit operator const_iterator() const noexcept
878 { return const_iterator(_M_node()); }
879
880 // A pointer to the node, or null.
881 __node_ptr _M_node() const
882 {
883 if (_M_before)
884 return static_cast<__node_ptr>(_M_before->_M_nxt);
885 return __node_ptr();
886 }
887
888 __node_base_ptr _M_before{}; // Must only be used to get _M_nxt
889 __hash_code _M_hash_code{}; // Only valid if _M_bucket_index != -1
890 size_type _M_bucket_index = size_type(-1);
891 };
892
893 // Adaptive lookup to find key, or which bucket it would be in.
894 // For a container smaller than the small size threshold use a linear
895 // search through the whole container, just testing for equality.
896 // Otherwise, calculate the hash code and bucket index for the key,
897 // and search in that bucket.
898 // The return value will have a pointer to the node _before_ the first
899 // node matching the key, if any such node exists. Returning the node
900 // before the desired one allows the result to be used for erasure.
901 // If no matching element is present, the hash code and bucket for the
902 // key will be set, allowing a new node to be inserted at that location.
903 // (The hash code and bucket might also be set when a node is found.)
904 // The _M_before pointer might point to _M_before_begin, so must not be
905 // cast to __node_ptr, and it must not be used to modify *_M_before
906 // except in non-const member functions, such as erase.
907
908 __location_type
909 _M_locate(const key_type& __k) const
910 { return _M_locate_tr<key_type>(__k); }
911
912 template <typename _Kt>
913 __location_type
914 _M_locate_tr(const _Kt& __k) const;
915
916 __node_ptr
917 _M_find_node(size_type __bkt, const key_type& __key,
918 __hash_code __c) const
919 {
920 if (__node_base_ptr __before_n = _M_find_before_node(__bkt, __key, __c))
921 return static_cast<__node_ptr>(__before_n->_M_nxt);
922 return nullptr;
923 }
924
925 template<typename _Kt>
926 __node_ptr
927 _M_find_node_tr(size_type __bkt, const _Kt& __key,
928 __hash_code __c) const
929 {
930 if (auto __before_n = _M_find_before_node_tr(__bkt, __key, __c))
931 return static_cast<__node_ptr>(__before_n->_M_nxt);
932 return nullptr;
933 }
934
935 // Insert a node at the beginning of a bucket.
936 void
937 _M_insert_bucket_begin(size_type __bkt, __node_ptr __node)
938 {
939 if (_M_buckets[__bkt])
940 {
941 // Bucket is not empty, we just need to insert the new node
942 // after the bucket before begin.
943 __node->_M_nxt = _M_buckets[__bkt]->_M_nxt;
944 _M_buckets[__bkt]->_M_nxt = __node;
945 }
946 else
947 {
948 // The bucket is empty, the new node is inserted at the
949 // beginning of the singly-linked list and the bucket will
950 // contain _M_before_begin pointer.
951 __node->_M_nxt = _M_before_begin._M_nxt;
952 _M_before_begin._M_nxt = __node;
953
954 if (__node->_M_nxt)
955 // We must update former begin bucket that is pointing to
956 // _M_before_begin.
957 _M_buckets[_M_bucket_index(*__node->_M_next())] = __node;
958
959 _M_buckets[__bkt] = &_M_before_begin;
960 }
961 }
962
963 // Remove the bucket first node
964 void
965 _M_remove_bucket_begin(size_type __bkt, __node_ptr __next_n,
966 size_type __next_bkt)
967 {
968 if (!__next_n)
969 _M_buckets[__bkt] = nullptr;
970 else if (__next_bkt != __bkt)
971 {
972 _M_buckets[__next_bkt] = _M_buckets[__bkt];
973 _M_buckets[__bkt] = nullptr;
974 }
975 }
976
977 // Get the node before __n in the bucket __bkt
978 __node_base_ptr
979 _M_get_previous_node(size_type __bkt, __node_ptr __n);
980
981 pair<__node_ptr, __hash_code>
982 _M_compute_hash_code(__node_ptr __hint, const key_type& __k) const;
983
984 // Insert node __n with hash code __code, in bucket __bkt (or another
985 // bucket if rehashing is needed).
986 // Assumes no element with equivalent key is already present.
987 // Takes ownership of __n if insertion succeeds, throws otherwise.
988 // __n_elt is an estimated number of elements we expect to insert,
989 // used as a hint for rehashing when inserting a range.
990 iterator
991 _M_insert_unique_node(size_type __bkt, __hash_code,
992 __node_ptr __n, size_type __n_elt = 1);
993
994 // Insert node __n with key __k and hash code __code.
995 // Takes ownership of __n if insertion succeeds, throws otherwise.
996 iterator
997 _M_insert_multi_node(__node_ptr __hint,
998 __hash_code __code, __node_ptr __n);
999
1000 template<typename... _Args>
1002 _M_emplace_uniq(_Args&&... __args);
1003
1004#pragma GCC diagnostic push
1005#pragma GCC diagnostic ignored "-Wc++14-extensions" // variable templates
1006 template<typename _Arg, typename _DArg = __remove_cvref_t<_Arg>,
1007 typename = _ExtractKey>
1008 static constexpr bool __is_key_type = false;
1009
1010 template<typename _Arg>
1011 static constexpr bool
1012 __is_key_type<_Arg, key_type, __detail::_Identity> = true;
1013
1014 template<typename _Arg, typename _Arg1, typename _Arg2>
1015 static constexpr bool
1016 __is_key_type<_Arg, pair<_Arg1, _Arg2>, __detail::_Select1st>
1017 = is_same<__remove_cvref_t<_Arg1>, key_type>::value;
1018#pragma GCC diagnostic pop
1019
1020 template<typename... _Args>
1021 iterator
1022 _M_emplace_multi(const_iterator, _Args&&... __args);
1023
1024 iterator
1025 _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n);
1026
1027 size_type
1028 _M_erase_some(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n);
1029
1030 template<typename _InputIterator>
1031 void
1032 _M_insert_range_multi(_InputIterator __first, _InputIterator __last);
1033
1034 public:
1035#pragma GCC diagnostic push
1036#pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
1037 // Emplace
1038 template<typename... _Args>
1039 __ireturn_type
1040 emplace(_Args&&... __args)
1041 {
1042 if constexpr (__unique_keys::value)
1043 return _M_emplace_uniq(std::forward<_Args>(__args)...);
1044 else
1045 return _M_emplace_multi(cend(), std::forward<_Args>(__args)...);
1046 }
1047
1048 template<typename... _Args>
1049 iterator
1050 emplace_hint(const_iterator __hint, _Args&&... __args)
1051 {
1052 if constexpr (__unique_keys::value)
1053 return _M_emplace_uniq(std::forward<_Args>(__args)...).first;
1054 else
1055 return _M_emplace_multi(__hint, std::forward<_Args>(__args)...);
1056 }
1057
1058 // Insert
1059 __ireturn_type
1060 insert(const value_type& __v)
1061 {
1062 if constexpr (__unique_keys::value)
1063 return _M_emplace_uniq(__v);
1064 else
1065 return _M_emplace_multi(cend(), __v);
1066 }
1067
1068 iterator
1069 insert(const_iterator __hint, const value_type& __v)
1070 {
1071 if constexpr (__unique_keys::value)
1072 return _M_emplace_uniq(__v).first;
1073 else
1074 return _M_emplace_multi(__hint, __v);
1075 }
1076
1077 __ireturn_type
1078 insert(value_type&& __v)
1079 {
1080 if constexpr (__unique_keys::value)
1081 return _M_emplace_uniq(std::move(__v));
1082 else
1083 return _M_emplace_multi(cend(), std::move(__v));
1084 }
1085
1086 iterator
1087 insert(const_iterator __hint, value_type&& __v)
1088 {
1089 if constexpr (__unique_keys::value)
1090 return _M_emplace_uniq(std::move(__v)).first;
1091 else
1092 return _M_emplace_multi(__hint, std::move(__v));
1093 }
1094
1095#ifdef __glibcxx_unordered_map_try_emplace // C++ >= 17 && HOSTED
1096 template<typename _KType, typename... _Args>
1098 try_emplace(const_iterator, _KType&& __k, _Args&&... __args)
1099 {
1100 __hash_code __code;
1101 size_type __bkt;
1102 if (auto __loc = _M_locate(__k))
1103 return { iterator(__loc), false };
1104 else
1105 {
1106 __code = __loc._M_hash_code;
1107 __bkt = __loc._M_bucket_index;
1108 }
1109
1110 _Scoped_node __node {
1111 this,
1115 };
1116 auto __it = _M_insert_unique_node(__bkt, __code, __node._M_node);
1117 __node._M_node = nullptr;
1118 return { __it, true };
1119 }
1120#endif
1121
1122 void
1123 insert(initializer_list<value_type> __l)
1124 { this->insert(__l.begin(), __l.end()); }
1125
1126 template<typename _InputIterator>
1127 void
1128 insert(_InputIterator __first, _InputIterator __last)
1129 {
1130 if constexpr (__unique_keys::value)
1131 for (; __first != __last; ++__first)
1132 _M_emplace_uniq(*__first);
1133 else
1134 return _M_insert_range_multi(__first, __last);
1135 }
1136
1137 // This overload is only defined for maps, not sets.
1138 template<typename _Pair,
1139 typename = _Require<__not_<is_same<_Key, _Value>>,
1140 is_constructible<value_type, _Pair&&>>>
1141 __ireturn_type
1142 insert(_Pair&& __v)
1143 {
1144 if constexpr (__unique_keys::value)
1145 return _M_emplace_uniq(std::forward<_Pair>(__v));
1146 else
1147 return _M_emplace_multi(cend(), std::forward<_Pair>(__v));
1148 }
1149
1150 // This overload is only defined for maps, not sets.
1151 template<typename _Pair,
1152 typename = _Require<__not_<is_same<_Key, _Value>>,
1153 is_constructible<value_type, _Pair&&>>>
1154 iterator
1155 insert(const_iterator __hint, _Pair&& __v)
1156 {
1157 if constexpr (__unique_keys::value)
1158 return _M_emplace_uniq(std::forward<_Pair>(__v));
1159 else
1160 return _M_emplace_multi(__hint, std::forward<_Pair>(__v));
1161 }
1162#pragma GCC diagnostic pop
1163
1164 // Erase
1165 iterator
1166 erase(const_iterator);
1167
1168 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1169 // 2059. C++0x ambiguity problem with map::erase
1170 iterator
1171 erase(iterator __it)
1172 { return erase(const_iterator(__it)); }
1173
1174 size_type
1175 erase(const key_type& __k);
1176
1177 template <typename _Kt>
1178 size_type
1179 _M_erase_tr(const _Kt& __k);
1180
1181 iterator
1182 erase(const_iterator, const_iterator);
1183
1184 void
1185 clear() noexcept;
1186
1187 // Set number of buckets keeping it appropriate for container's number
1188 // of elements.
1189 void rehash(size_type __bkt_count);
1190
1191 // DR 1189.
1192 // reserve, if present, comes from _Rehash_base.
1193
1194#if __glibcxx_node_extract // >= C++17 && HOSTED
1195 /// Re-insert an extracted node into a container with unique keys.
1196 insert_return_type
1197 _M_reinsert_node(node_type&& __nh)
1198 {
1199 insert_return_type __ret;
1200 if (__nh.empty())
1201 __ret.position = end();
1202 else
1203 {
1204 __glibcxx_assert(get_allocator() == __nh.get_allocator());
1205
1206 if (auto __loc = _M_locate(__nh._M_key()))
1207 {
1208 __ret.node = std::move(__nh);
1209 __ret.position = iterator(__loc);
1210 __ret.inserted = false;
1211 }
1212 else
1213 {
1214 auto __code = __loc._M_hash_code;
1215 auto __bkt = __loc._M_bucket_index;
1216 __ret.position
1217 = _M_insert_unique_node(__bkt, __code, __nh._M_ptr);
1218 __ret.inserted = true;
1219 __nh.release();
1220 }
1221 }
1222 return __ret;
1223 }
1224
1225 /// Re-insert an extracted node into a container with equivalent keys.
1226 iterator
1227 _M_reinsert_node_multi(const_iterator __hint, node_type&& __nh)
1228 {
1229 if (__nh.empty())
1230 return end();
1231
1232 __glibcxx_assert(get_allocator() == __nh.get_allocator());
1233
1234 const key_type& __k = __nh._M_key();
1235 auto __code = this->_M_hash_code(__k);
1236 auto __ret
1237 = _M_insert_multi_node(__hint._M_cur, __code, __nh._M_ptr);
1238 __nh.release();
1239 return __ret;
1240 }
1241
1242 private:
1243 node_type
1244 _M_extract_node(size_t __bkt, __node_base_ptr __prev_n)
1245 {
1246 __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
1247 if (__prev_n == _M_buckets[__bkt])
1248 _M_remove_bucket_begin(__bkt, __n->_M_next(),
1249 __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
1250 else if (__n->_M_nxt)
1251 {
1252 size_type __next_bkt = _M_bucket_index(*__n->_M_next());
1253 if (__next_bkt != __bkt)
1254 _M_buckets[__next_bkt] = __prev_n;
1255 }
1256
1257 __prev_n->_M_nxt = __n->_M_nxt;
1258 __n->_M_nxt = nullptr;
1259 --_M_element_count;
1260 return { __n, this->_M_node_allocator() };
1261 }
1262
1263 // Hash code for node __src_n with key __k, using this->hash_function().
1264 // Will use a hash code cached in the node if safe to do so. This is
1265 // for use in _M_merge_multi where the node comes from another container
1266 // with a hash function that might not match this->hash_function().
1267 template<typename _H2>
1268 __hash_code
1269 _M_src_hash_code(const _H2&, const __node_value_type& __src_n) const
1270 {
1271 if constexpr (__and_<__hash_cached,
1272 is_same<_H2, _Hash>, is_empty<_Hash>>::value)
1273 // If the node has a cached hash code, it's OK to use it.
1274 return __src_n._M_hash_code;
1275 else
1276 return this->_M_hash_code(_ExtractKey{}(__src_n._M_v()));
1277 }
1278
1279 public:
1280 // Extract a node.
1281 node_type
1282 extract(const_iterator __pos)
1283 {
1284 size_t __bkt = _M_bucket_index(*__pos._M_cur);
1285 return _M_extract_node(__bkt,
1286 _M_get_previous_node(__bkt, __pos._M_cur));
1287 }
1288
1289 /// Extract a node.
1290 node_type
1291 extract(const _Key& __k)
1292 { return _M_extract_tr<_Key>(__k); }
1293
1294 template <typename _Kt>
1295 node_type
1296 _M_extract_tr(const _Kt& __k)
1297 {
1298 node_type __nh;
1299 __hash_code __code = this->_M_hash_code_tr(__k);
1300 std::size_t __bkt = _M_bucket_index(__code);
1301 if (__node_base_ptr __prev_node =
1302 _M_find_before_node_tr(__bkt, __k, __code))
1303 __nh = _M_extract_node(__bkt, __prev_node);
1304 return __nh;
1305 }
1306
1307 /// Merge from another container of the same type.
1308 void
1309 _M_merge_unique(_Hashtable& __src)
1310 {
1311 __glibcxx_assert(get_allocator() == __src.get_allocator());
1312
1313 using _PTr = pointer_traits<__node_base_ptr>;
1314
1315 auto __n_elt = __src.size();
1316 size_type __first = 1;
1317 // For a container of identical type we can use its private members,
1318 // __src._M_before_begin, __src._M_bucket_index etc.
1319 auto __prev = _PTr::pointer_to(__src._M_before_begin);
1320 while (__n_elt--)
1321 {
1322 const auto __next = __prev->_M_nxt;
1323 const auto& __node = static_cast<__node_type&>(*__next);
1324 const key_type& __k = _ExtractKey{}(__node._M_v());
1325 const auto __loc = _M_locate(__k);
1326 if (__loc)
1327 {
1328 __prev = __next;
1329 continue;
1330 }
1331
1332 auto __src_bkt = __src._M_bucket_index(__node);
1333 auto __nh = __src._M_extract_node(__src_bkt, __prev);
1334 _M_insert_unique_node(__loc._M_bucket_index, __loc._M_hash_code,
1335 __nh._M_ptr, __first * __n_elt + 1);
1336 __nh.release();
1337 __first = 0;
1338 }
1339 }
1340
1341 /// Merge from a compatible container into one with unique keys.
1342 template<typename _Compatible_Hashtable>
1343 void
1344 _M_merge_unique(_Compatible_Hashtable& __src)
1345 {
1346 static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
1347 node_type>, "Node types are compatible");
1348 __glibcxx_assert(get_allocator() == __src.get_allocator());
1349
1350 auto __n_elt = __src.size();
1351 size_type __first = 1;
1352 // For a compatible container we can only use the public API,
1353 // so cbegin(), cend(), hash_function(), and extract(iterator).
1354 for (auto __i = __src.cbegin(), __end = __src.cend(); __i != __end;)
1355 {
1356 --__n_elt;
1357 auto __pos = __i++;
1358 const key_type& __k = _ExtractKey{}(*__pos);
1359 const auto __loc = _M_locate(__k);
1360 if (__loc)
1361 continue;
1362
1363 auto __nh = __src.extract(__pos);
1364 _M_insert_unique_node(__loc._M_bucket_index,
1365 __loc._M_hash_code, __nh._M_ptr,
1366 __first * __n_elt + 1);
1367 __nh.release();
1368 __first = 0;
1369 }
1370 }
1371
1372 /// Merge from another container of the same type.
1373 void
1374 _M_merge_multi(_Hashtable& __src)
1375 {
1376 __glibcxx_assert(get_allocator() == __src.get_allocator());
1377
1378 if (__src.size() == 0) [[__unlikely__]]
1379 return;
1380
1381 using _PTr = pointer_traits<__node_base_ptr>;
1382
1383 __node_ptr __hint = nullptr;
1384 this->reserve(size() + __src.size());
1385 // For a container of identical type we can use its private members,
1386 // __src._M_before_begin, __src._M_bucket_index etc.
1387 auto __prev = _PTr::pointer_to(__src._M_before_begin);
1388 do
1389 {
1390 const auto& __node = static_cast<__node_type&>(*__prev->_M_nxt);
1391 // Hash code from this:
1392 auto __code = _M_hash_code_ext(__node);
1393 // Bucket index in __src, using code from __src.hash_function():
1394 size_type __src_bkt = __src._M_bucket_index(__node);
1395 auto __nh = __src._M_extract_node(__src_bkt, __prev);
1396 __hint = _M_insert_multi_node(__hint, __code, __nh._M_ptr)._M_cur;
1397 __nh.release();
1398 }
1399 while (__prev->_M_nxt != nullptr);
1400 }
1401
1402 /// Merge from a compatible container into one with equivalent keys.
1403 template<typename _Compatible_Hashtable>
1404 void
1405 _M_merge_multi(_Compatible_Hashtable& __src)
1406 {
1407 static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
1408 node_type>, "Node types are compatible");
1409 __glibcxx_assert(get_allocator() == __src.get_allocator());
1410
1411 __node_ptr __hint = nullptr;
1412 this->reserve(size() + __src.size());
1413 // For a compatible container we can only use the public API,
1414 // so cbegin(), cend(), hash_function(), and extract(iterator).
1415 for (auto __i = __src.cbegin(), __end = __src.cend(); __i != __end;)
1416 {
1417 auto __pos = __i++;
1418 __hash_code __code
1419 = _M_src_hash_code(__src.hash_function(), *__pos._M_cur);
1420 auto __nh = __src.extract(__pos);
1421 __hint = _M_insert_multi_node(__hint, __code, __nh._M_ptr)._M_cur;
1422 __nh.release();
1423 }
1424 }
1425#endif // C++17 __glibcxx_node_extract
1426
1427 bool
1428 _M_equal(const _Hashtable& __other) const;
1429
1430 private:
1431 // Helper rehash method used when keys are unique.
1432 void _M_rehash(size_type __bkt_count, true_type __uks);
1433
1434 // Helper rehash method used when keys can be non-unique.
1435 void _M_rehash(size_type __bkt_count, false_type __uks);
1436 };
1437
1438 // Definitions of class template _Hashtable's out-of-line member functions.
1439 template<typename _Key, typename _Value, typename _Alloc,
1440 typename _ExtractKey, typename _Equal,
1441 typename _Hash, typename _RangeHash, typename _Unused,
1442 typename _RehashPolicy, typename _Traits>
1443 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1444 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1445 _Hashtable(size_type __bkt_count_hint,
1446 const _Hash& __h, const _Equal& __eq, const allocator_type& __a)
1447 : _Hashtable(__h, __eq, __a)
1448 {
1449 auto __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count_hint);
1450 if (__bkt_count > _M_bucket_count)
1451 {
1452 _M_buckets = _M_allocate_buckets(__bkt_count);
1453 _M_bucket_count = __bkt_count;
1454 }
1455 }
1456
1457 template<typename _Key, typename _Value, typename _Alloc,
1458 typename _ExtractKey, typename _Equal,
1459 typename _Hash, typename _RangeHash, typename _Unused,
1460 typename _RehashPolicy, typename _Traits>
1461 template<typename _InputIterator>
1462 inline
1463 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1464 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1465 _Hashtable(_InputIterator __f, _InputIterator __l,
1466 size_type __bkt_count_hint,
1467 const _Hash& __h, const _Equal& __eq,
1468 const allocator_type& __a, true_type /* __uks */)
1469 : _Hashtable(__bkt_count_hint, __h, __eq, __a)
1470 { this->insert(__f, __l); }
1471
1472 template<typename _Key, typename _Value, typename _Alloc,
1473 typename _ExtractKey, typename _Equal,
1474 typename _Hash, typename _RangeHash, typename _Unused,
1475 typename _RehashPolicy, typename _Traits>
1476 template<typename _InputIterator>
1477 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1478 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1479 _Hashtable(_InputIterator __f, _InputIterator __l,
1480 size_type __bkt_count_hint,
1481 const _Hash& __h, const _Equal& __eq,
1482 const allocator_type& __a, false_type __uks)
1483 : _Hashtable(__h, __eq, __a)
1484 {
1485 auto __nb_elems = __detail::__distance_fw(__f, __l);
1486 auto __bkt_count =
1487 _M_rehash_policy._M_next_bkt(
1488 std::max(_M_rehash_policy._M_bkt_for_elements(__nb_elems),
1489 __bkt_count_hint));
1490
1491 if (__bkt_count > _M_bucket_count)
1492 {
1493 _M_buckets = _M_allocate_buckets(__bkt_count);
1494 _M_bucket_count = __bkt_count;
1495 }
1496
1497 for (; __f != __l; ++__f)
1498 _M_emplace_multi(cend(), *__f);
1499 }
1500
1501 template<typename _Key, typename _Value, typename _Alloc,
1502 typename _ExtractKey, typename _Equal,
1503 typename _Hash, typename _RangeHash, typename _Unused,
1504 typename _RehashPolicy, typename _Traits>
1505 auto
1506 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1507 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1508 operator=(const _Hashtable& __ht)
1509 -> _Hashtable&
1510 {
1511 if (&__ht == this)
1512 return *this;
1513
1514 if (__node_alloc_traits::_S_propagate_on_copy_assign())
1515 {
1516 auto& __this_alloc = this->_M_node_allocator();
1517 auto& __that_alloc = __ht._M_node_allocator();
1518 if (!__node_alloc_traits::_S_always_equal()
1519 && __this_alloc != __that_alloc)
1520 {
1521 // Replacement allocator cannot free existing storage.
1522 this->_M_deallocate_nodes(_M_begin());
1523 _M_before_begin._M_nxt = nullptr;
1524 _M_deallocate_buckets();
1525 _M_buckets = nullptr;
1526 std::__alloc_on_copy(__this_alloc, __that_alloc);
1527 __hashtable_base::operator=(__ht);
1528 _M_bucket_count = __ht._M_bucket_count;
1529 _M_element_count = __ht._M_element_count;
1530 _M_rehash_policy = __ht._M_rehash_policy;
1531
1532 struct _Guard
1533 {
1534 ~_Guard() { if (_M_ht) _M_ht->_M_reset(); }
1535 _Hashtable* _M_ht;
1536 };
1537 // If _M_assign exits via an exception it will have deallocated
1538 // all memory. This guard will ensure *this is in a usable state.
1539 _Guard __guard{this};
1540 _M_assign(__ht);
1541 __guard._M_ht = nullptr;
1542 return *this;
1543 }
1544 std::__alloc_on_copy(__this_alloc, __that_alloc);
1545 }
1546
1547 // Reuse allocated buckets and nodes.
1548 _M_assign_elements(__ht);
1549 return *this;
1550 }
1551
1552 template<typename _Key, typename _Value, typename _Alloc,
1553 typename _ExtractKey, typename _Equal,
1554 typename _Hash, typename _RangeHash, typename _Unused,
1555 typename _RehashPolicy, typename _Traits>
1556 template<typename _Ht>
1557 void
1558 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1559 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1560 _M_assign_elements(_Ht&& __ht)
1561 {
1562 using __reuse_or_alloc_node_gen_t =
1563 __detail::_ReuseOrAllocNode<__node_alloc_type>;
1564
1565 __buckets_ptr __former_buckets = nullptr;
1566 std::size_t __former_bucket_count = _M_bucket_count;
1567 __rehash_guard_t __rehash_guard(_M_rehash_policy);
1568
1569 if (_M_bucket_count != __ht._M_bucket_count)
1570 {
1571 __former_buckets = _M_buckets;
1572 _M_buckets = _M_allocate_buckets(__ht._M_bucket_count);
1573 _M_bucket_count = __ht._M_bucket_count;
1574 }
1575 else
1576 std::fill_n(_M_buckets, _M_bucket_count, nullptr);
1577
1578 __try
1579 {
1580 __hashtable_base::operator=(std::forward<_Ht>(__ht));
1581 _M_element_count = __ht._M_element_count;
1582 _M_rehash_policy = __ht._M_rehash_policy;
1583 __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
1584 _M_before_begin._M_nxt = nullptr;
1585 _M_assign(std::forward<_Ht>(__ht), __roan);
1586 if (__former_buckets)
1587 _M_deallocate_buckets(__former_buckets, __former_bucket_count);
1588 __rehash_guard._M_guarded_obj = nullptr;
1589 }
1590 __catch(...)
1591 {
1592 if (__former_buckets)
1593 {
1594 // Restore previous buckets.
1595 _M_deallocate_buckets();
1596 _M_buckets = __former_buckets;
1597 _M_bucket_count = __former_bucket_count;
1598 }
1599 std::fill_n(_M_buckets, _M_bucket_count, nullptr);
1600 __throw_exception_again;
1601 }
1602 }
1603
1604 template<typename _Key, typename _Value, typename _Alloc,
1605 typename _ExtractKey, typename _Equal,
1606 typename _Hash, typename _RangeHash, typename _Unused,
1607 typename _RehashPolicy, typename _Traits>
1608 template<typename _Ht, typename _NodeGenerator>
1609 void
1610 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1611 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1612 _M_assign(_Ht&& __ht, _NodeGenerator& __node_gen)
1613 {
1614 struct _Guard
1615 {
1616 ~_Guard()
1617 {
1618 if (_M_ht)
1619 {
1620 _M_ht->clear();
1621 if (_M_dealloc_buckets)
1622 _M_ht->_M_deallocate_buckets();
1623 }
1624 }
1625 _Hashtable* _M_ht = nullptr;
1626 bool _M_dealloc_buckets = false;
1627 };
1628 _Guard __guard;
1629
1630 if (!_M_buckets)
1631 {
1632 _M_buckets = _M_allocate_buckets(_M_bucket_count);
1633 __guard._M_dealloc_buckets = true;
1634 }
1635
1636 if (!__ht._M_before_begin._M_nxt)
1637 return;
1638
1639 __guard._M_ht = this;
1640
1641 using _FromVal = __conditional_t<is_lvalue_reference<_Ht>::value,
1642 const value_type&, value_type&&>;
1643
1644 // First deal with the special first node pointed to by
1645 // _M_before_begin.
1646 __node_ptr __ht_n = __ht._M_begin();
1647 __node_ptr __this_n
1648 = __node_gen(static_cast<_FromVal>(__ht_n->_M_v()));
1649 _M_copy_code(*__this_n, *__ht_n);
1650 _M_update_bbegin(__this_n);
1651
1652 // Then deal with other nodes.
1653 __node_ptr __prev_n = __this_n;
1654 for (__ht_n = __ht_n->_M_next(); __ht_n; __ht_n = __ht_n->_M_next())
1655 {
1656 __this_n = __node_gen(static_cast<_FromVal>(__ht_n->_M_v()));
1657 __prev_n->_M_nxt = __this_n;
1658 _M_copy_code(*__this_n, *__ht_n);
1659 size_type __bkt = _M_bucket_index(*__this_n);
1660 if (!_M_buckets[__bkt])
1661 _M_buckets[__bkt] = __prev_n;
1662 __prev_n = __this_n;
1663 }
1664 __guard._M_ht = nullptr;
1665 }
1666
1667 template<typename _Key, typename _Value, typename _Alloc,
1668 typename _ExtractKey, typename _Equal,
1669 typename _Hash, typename _RangeHash, typename _Unused,
1670 typename _RehashPolicy, typename _Traits>
1671 void
1672 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1673 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1674 _M_reset() noexcept
1675 {
1676 _M_rehash_policy._M_reset();
1677 _M_bucket_count = 1;
1678 _M_single_bucket = nullptr;
1679 _M_buckets = &_M_single_bucket;
1680 _M_before_begin._M_nxt = nullptr;
1681 _M_element_count = 0;
1682 }
1683
1684 template<typename _Key, typename _Value, typename _Alloc,
1685 typename _ExtractKey, typename _Equal,
1686 typename _Hash, typename _RangeHash, typename _Unused,
1687 typename _RehashPolicy, typename _Traits>
1688 void
1689 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1690 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1691 _M_move_assign(_Hashtable&& __ht, true_type)
1692 {
1693 if (__builtin_expect(std::__addressof(__ht) == this, false))
1694 return;
1695
1696 this->_M_deallocate_nodes(_M_begin());
1697 _M_deallocate_buckets();
1698 __hashtable_base::operator=(std::move(__ht));
1699 _M_rehash_policy = __ht._M_rehash_policy;
1700 if (!__ht._M_uses_single_bucket())
1701 _M_buckets = __ht._M_buckets;
1702 else
1703 {
1704 _M_buckets = &_M_single_bucket;
1705 _M_single_bucket = __ht._M_single_bucket;
1706 }
1707
1708 _M_bucket_count = __ht._M_bucket_count;
1709 _M_before_begin._M_nxt = __ht._M_before_begin._M_nxt;
1710 _M_element_count = __ht._M_element_count;
1711 std::__alloc_on_move(this->_M_node_allocator(), __ht._M_node_allocator());
1712
1713 // Fix bucket containing the _M_before_begin pointer that can't be moved.
1714 _M_update_bbegin();
1715 __ht._M_reset();
1716 }
1717
1718 template<typename _Key, typename _Value, typename _Alloc,
1719 typename _ExtractKey, typename _Equal,
1720 typename _Hash, typename _RangeHash, typename _Unused,
1721 typename _RehashPolicy, typename _Traits>
1722 void
1723 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1724 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1725 _M_move_assign(_Hashtable&& __ht, false_type)
1726 {
1727 if (__ht._M_node_allocator() == this->_M_node_allocator())
1728 _M_move_assign(std::move(__ht), true_type{});
1729 else
1730 {
1731 // Can't move memory, move elements then.
1732 _M_assign_elements(std::move(__ht));
1733 __ht.clear();
1734 }
1735 }
1736
1737 template<typename _Key, typename _Value, typename _Alloc,
1738 typename _ExtractKey, typename _Equal,
1739 typename _Hash, typename _RangeHash, typename _Unused,
1740 typename _RehashPolicy, typename _Traits>
1741 inline
1742 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1743 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1744 _Hashtable(const _Hashtable& __ht)
1745 : __hashtable_base(__ht),
1746 __map_base(__ht),
1747 __rehash_base(__ht),
1748 __hashtable_alloc(
1749 __node_alloc_traits::_S_select_on_copy(__ht._M_node_allocator())),
1750 __enable_default_ctor(__ht),
1751 _M_buckets(nullptr),
1752 _M_bucket_count(__ht._M_bucket_count),
1753 _M_element_count(__ht._M_element_count),
1754 _M_rehash_policy(__ht._M_rehash_policy)
1755 {
1756 _M_assign(__ht);
1757 }
1758
1759 template<typename _Key, typename _Value, typename _Alloc,
1760 typename _ExtractKey, typename _Equal,
1761 typename _Hash, typename _RangeHash, typename _Unused,
1762 typename _RehashPolicy, typename _Traits>
1763 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1764 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1765 _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1766 true_type /* alloc always equal */)
1767 noexcept(_S_nothrow_move())
1768 : __hashtable_base(__ht),
1769 __map_base(__ht),
1770 __rehash_base(__ht),
1771 __hashtable_alloc(std::move(__a)),
1772 __enable_default_ctor(__ht),
1773 _M_buckets(__ht._M_buckets),
1774 _M_bucket_count(__ht._M_bucket_count),
1775 _M_before_begin(__ht._M_before_begin._M_nxt),
1776 _M_element_count(__ht._M_element_count),
1777 _M_rehash_policy(__ht._M_rehash_policy)
1778 {
1779 // Update buckets if __ht is using its single bucket.
1780 if (__ht._M_uses_single_bucket())
1781 {
1782 _M_buckets = &_M_single_bucket;
1783 _M_single_bucket = __ht._M_single_bucket;
1784 }
1785
1786 // Fix bucket containing the _M_before_begin pointer that can't be moved.
1787 _M_update_bbegin();
1788
1789 __ht._M_reset();
1790 }
1791
1792 template<typename _Key, typename _Value, typename _Alloc,
1793 typename _ExtractKey, typename _Equal,
1794 typename _Hash, typename _RangeHash, typename _Unused,
1795 typename _RehashPolicy, typename _Traits>
1796 inline
1797 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1798 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1799 _Hashtable(const _Hashtable& __ht, const allocator_type& __a)
1800 : __hashtable_base(__ht),
1801 __map_base(__ht),
1802 __rehash_base(__ht),
1803 __hashtable_alloc(__node_alloc_type(__a)),
1804 __enable_default_ctor(__ht),
1805 _M_buckets(),
1806 _M_bucket_count(__ht._M_bucket_count),
1807 _M_element_count(__ht._M_element_count),
1808 _M_rehash_policy(__ht._M_rehash_policy)
1809 {
1810 _M_assign(__ht);
1811 }
1812
1813 template<typename _Key, typename _Value, typename _Alloc,
1814 typename _ExtractKey, typename _Equal,
1815 typename _Hash, typename _RangeHash, typename _Unused,
1816 typename _RehashPolicy, typename _Traits>
1817 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1818 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1819 _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1820 false_type /* alloc always equal */)
1821 : __hashtable_base(__ht),
1822 __map_base(__ht),
1823 __rehash_base(__ht),
1824 __hashtable_alloc(std::move(__a)),
1825 __enable_default_ctor(__ht),
1826 _M_buckets(nullptr),
1827 _M_bucket_count(__ht._M_bucket_count),
1828 _M_element_count(__ht._M_element_count),
1829 _M_rehash_policy(__ht._M_rehash_policy)
1830 {
1831 if (__ht._M_node_allocator() == this->_M_node_allocator())
1832 {
1833 if (__ht._M_uses_single_bucket())
1834 {
1835 _M_buckets = &_M_single_bucket;
1836 _M_single_bucket = __ht._M_single_bucket;
1837 }
1838 else
1839 _M_buckets = __ht._M_buckets;
1840
1841 // Fix bucket containing the _M_before_begin pointer that can't be
1842 // moved.
1843 _M_update_bbegin(__ht._M_begin());
1844
1845 __ht._M_reset();
1846 }
1847 else
1848 {
1849 using _Fwd_Ht = __conditional_t<
1850 __move_if_noexcept_cond<value_type>::value,
1851 const _Hashtable&, _Hashtable&&>;
1852 _M_assign(std::forward<_Fwd_Ht>(__ht));
1853 __ht.clear();
1854 }
1855 }
1856
1857 template<typename _Key, typename _Value, typename _Alloc,
1858 typename _ExtractKey, typename _Equal,
1859 typename _Hash, typename _RangeHash, typename _Unused,
1860 typename _RehashPolicy, typename _Traits>
1861 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1862 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1863 ~_Hashtable() noexcept
1864 {
1865 // Getting a bucket index from a node shall not throw because it is used
1866 // during the rehash process. This static_assert purpose is limited to usage
1867 // of _Hashtable with _Hashtable_traits requesting non-cached hash code.
1868 // Need a complete type to check this, so do it in the destructor not at
1869 // class scope.
1870 static_assert(noexcept(declval<const __hash_code_base_access&>()
1871 ._M_bucket_index(declval<const __node_value_type&>(),
1872 (std::size_t)0)),
1873 "Cache the hash code or qualify your functors involved"
1874 " in hash code and bucket index computation with noexcept");
1875
1876 this->_M_deallocate_nodes(_M_begin());
1877 _M_deallocate_buckets();
1878 }
1879
1880 template<typename _Key, typename _Value, typename _Alloc,
1881 typename _ExtractKey, typename _Equal,
1882 typename _Hash, typename _RangeHash, typename _Unused,
1883 typename _RehashPolicy, typename _Traits>
1884 void
1885 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1886 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1887 swap(_Hashtable& __x)
1888 noexcept(__and_<__is_nothrow_swappable<_Hash>,
1889 __is_nothrow_swappable<_Equal>>::value)
1890 {
1891 using std::swap;
1892 swap(__hash_code_base::_M_hash._M_obj,
1893 __x.__hash_code_base::_M_hash._M_obj);
1894 swap(__hashtable_base::_M_equal._M_obj,
1895 __x.__hashtable_base::_M_equal._M_obj);
1896
1897#pragma GCC diagnostic push
1898#pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
1899 if constexpr (__node_alloc_traits::propagate_on_container_swap::value)
1900 swap(this->_M_node_allocator(), __x._M_node_allocator());
1901#pragma GCC diagnostic pop
1902
1903 std::swap(_M_rehash_policy, __x._M_rehash_policy);
1904
1905 // Deal properly with potentially moved instances.
1906 if (this->_M_uses_single_bucket())
1907 {
1908 if (!__x._M_uses_single_bucket())
1909 {
1910 _M_buckets = __x._M_buckets;
1911 __x._M_buckets = &__x._M_single_bucket;
1912 }
1913 }
1914 else if (__x._M_uses_single_bucket())
1915 {
1916 __x._M_buckets = _M_buckets;
1917 _M_buckets = &_M_single_bucket;
1918 }
1919 else
1920 std::swap(_M_buckets, __x._M_buckets);
1921
1922 std::swap(_M_bucket_count, __x._M_bucket_count);
1923 std::swap(_M_before_begin._M_nxt, __x._M_before_begin._M_nxt);
1924 std::swap(_M_element_count, __x._M_element_count);
1925 std::swap(_M_single_bucket, __x._M_single_bucket);
1926
1927 // Fix buckets containing the _M_before_begin pointers that can't be
1928 // swapped.
1929 _M_update_bbegin();
1930 __x._M_update_bbegin();
1931 }
1932
1933 template<typename _Key, typename _Value, typename _Alloc,
1934 typename _ExtractKey, typename _Equal,
1935 typename _Hash, typename _RangeHash, typename _Unused,
1936 typename _RehashPolicy, typename _Traits>
1937 auto
1938 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1939 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1940 find(const key_type& __k)
1941 -> iterator
1942 { return iterator(_M_locate(__k)); }
1943
1944 template<typename _Key, typename _Value, typename _Alloc,
1945 typename _ExtractKey, typename _Equal,
1946 typename _Hash, typename _RangeHash, typename _Unused,
1947 typename _RehashPolicy, typename _Traits>
1948 auto
1949 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1950 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1951 find(const key_type& __k) const
1952 -> const_iterator
1953 { return const_iterator(_M_locate(__k)); }
1954
1955#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED
1956 template<typename _Key, typename _Value, typename _Alloc,
1957 typename _ExtractKey, typename _Equal,
1958 typename _Hash, typename _RangeHash, typename _Unused,
1959 typename _RehashPolicy, typename _Traits>
1960 template<typename _Kt, typename, typename>
1961 auto
1962 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1963 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1964 _M_find_tr(const _Kt& __k)
1965 -> iterator
1966 {
1967 if (size() <= __small_size_threshold())
1968 {
1969 for (auto __n = _M_begin(); __n; __n = __n->_M_next())
1970 if (this->_M_key_equals_tr(__k, *__n))
1971 return iterator(__n);
1972 return end();
1973 }
1974
1975 __hash_code __code = this->_M_hash_code_tr(__k);
1976 std::size_t __bkt = _M_bucket_index(__code);
1977 return iterator(_M_find_node_tr(__bkt, __k, __code));
1978 }
1979
1980 template<typename _Key, typename _Value, typename _Alloc,
1981 typename _ExtractKey, typename _Equal,
1982 typename _Hash, typename _RangeHash, typename _Unused,
1983 typename _RehashPolicy, typename _Traits>
1984 template<typename _Kt, typename, typename>
1985 auto
1986 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1987 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1988 _M_find_tr(const _Kt& __k) const
1989 -> const_iterator
1990 {
1991 if (size() <= __small_size_threshold())
1992 {
1993 for (auto __n = _M_begin(); __n; __n = __n->_M_next())
1994 if (this->_M_key_equals_tr(__k, *__n))
1995 return const_iterator(__n);
1996 return end();
1997 }
1998
1999 __hash_code __code = this->_M_hash_code_tr(__k);
2000 std::size_t __bkt = _M_bucket_index(__code);
2001 return const_iterator(_M_find_node_tr(__bkt, __k, __code));
2002 }
2003#endif // C++20 __glibcxx_generic_unordered_lookup
2004
2005 template<typename _Key, typename _Value, typename _Alloc,
2006 typename _ExtractKey, typename _Equal,
2007 typename _Hash, typename _RangeHash, typename _Unused,
2008 typename _RehashPolicy, typename _Traits>
2009 auto
2010 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2011 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2012 count(const key_type& __k) const
2013 -> size_type
2014 {
2015 auto __it = find(__k);
2016 if (!__it._M_cur)
2017 return 0;
2018
2019 if (__unique_keys::value)
2020 return 1;
2021
2022 size_type __result = 1;
2023 for (auto __ref = __it++;
2024 __it._M_cur && this->_M_node_equals(*__ref._M_cur, *__it._M_cur);
2025 ++__it)
2026 ++__result;
2027
2028 return __result;
2029 }
2030
2031#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED
2032 template<typename _Key, typename _Value, typename _Alloc,
2033 typename _ExtractKey, typename _Equal,
2034 typename _Hash, typename _RangeHash, typename _Unused,
2035 typename _RehashPolicy, typename _Traits>
2036 template<typename _Kt, typename, typename>
2037 auto
2038 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2039 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2040 _M_count_tr(const _Kt& __k) const
2041 -> size_type
2042 {
2043 if (size() <= __small_size_threshold())
2044 {
2045 size_type __result = 0;
2046 for (auto __n = _M_begin(); __n; __n = __n->_M_next())
2047 {
2048 if (this->_M_key_equals_tr(__k, *__n))
2049 {
2050 ++__result;
2051 continue;
2052 }
2053
2054 if (__result)
2055 break;
2056 }
2057
2058 return __result;
2059 }
2060
2061 __hash_code __code = this->_M_hash_code_tr(__k);
2062 std::size_t __bkt = _M_bucket_index(__code);
2063 auto __n = _M_find_node_tr(__bkt, __k, __code);
2064 if (!__n)
2065 return 0;
2066
2067 iterator __it(__n);
2068 size_type __result = 1;
2069 for (++__it;
2070 __it._M_cur && this->_M_equals_tr(__k, __code, *__it._M_cur);
2071 ++__it)
2072 ++__result;
2073
2074 return __result;
2075 }
2076#endif // C++20 __glibcxx_generic_unordered_lookup
2077
2078 template<typename _Key, typename _Value, typename _Alloc,
2079 typename _ExtractKey, typename _Equal,
2080 typename _Hash, typename _RangeHash, typename _Unused,
2081 typename _RehashPolicy, typename _Traits>
2082 auto
2083 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2084 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2085 equal_range(const key_type& __k)
2086 -> pair<iterator, iterator>
2087 {
2088 auto __ite = find(__k);
2089 if (!__ite._M_cur)
2090 return { __ite, __ite };
2091
2092 auto __beg = __ite++;
2093 if (__unique_keys::value)
2094 return { __beg, __ite };
2095
2096 while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
2097 ++__ite;
2098
2099 return { __beg, __ite };
2100 }
2101
2102 template<typename _Key, typename _Value, typename _Alloc,
2103 typename _ExtractKey, typename _Equal,
2104 typename _Hash, typename _RangeHash, typename _Unused,
2105 typename _RehashPolicy, typename _Traits>
2106 auto
2107 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2108 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2109 equal_range(const key_type& __k) const
2110 -> pair<const_iterator, const_iterator>
2111 {
2112 auto __ite = find(__k);
2113 if (!__ite._M_cur)
2114 return { __ite, __ite };
2115
2116 auto __beg = __ite++;
2117 if (__unique_keys::value)
2118 return { __beg, __ite };
2119
2120 while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
2121 ++__ite;
2122
2123 return { __beg, __ite };
2124 }
2125
2126#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED
2127 template<typename _Key, typename _Value, typename _Alloc,
2128 typename _ExtractKey, typename _Equal,
2129 typename _Hash, typename _RangeHash, typename _Unused,
2130 typename _RehashPolicy, typename _Traits>
2131 template<typename _Kt, typename, typename>
2132 auto
2133 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2134 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2135 _M_equal_range_tr(const _Kt& __k)
2136 -> pair<iterator, iterator>
2137 {
2138 if (size() <= __small_size_threshold())
2139 {
2140 __node_ptr __n, __beg = nullptr;
2141 for (__n = _M_begin(); __n; __n = __n->_M_next())
2142 {
2143 if (this->_M_key_equals_tr(__k, *__n))
2144 {
2145 if (!__beg)
2146 __beg = __n;
2147 continue;
2148 }
2149
2150 if (__beg)
2151 break;
2152 }
2153
2154 return { iterator(__beg), iterator(__n) };
2155 }
2156
2157 __hash_code __code = this->_M_hash_code_tr(__k);
2158 std::size_t __bkt = _M_bucket_index(__code);
2159 auto __n = _M_find_node_tr(__bkt, __k, __code);
2160 iterator __ite(__n);
2161 if (!__n)
2162 return { __ite, __ite };
2163
2164 auto __beg = __ite++;
2165 while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
2166 ++__ite;
2167
2168 return { __beg, __ite };
2169 }
2170
2171 template<typename _Key, typename _Value, typename _Alloc,
2172 typename _ExtractKey, typename _Equal,
2173 typename _Hash, typename _RangeHash, typename _Unused,
2174 typename _RehashPolicy, typename _Traits>
2175 template<typename _Kt, typename, typename>
2176 auto
2177 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2178 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2179 _M_equal_range_tr(const _Kt& __k) const
2180 -> pair<const_iterator, const_iterator>
2181 {
2182 if (size() <= __small_size_threshold())
2183 {
2184 __node_ptr __n, __beg = nullptr;
2185 for (__n = _M_begin(); __n; __n = __n->_M_next())
2186 {
2187 if (this->_M_key_equals_tr(__k, *__n))
2188 {
2189 if (!__beg)
2190 __beg = __n;
2191 continue;
2192 }
2193
2194 if (__beg)
2195 break;
2196 }
2197
2198 return { const_iterator(__beg), const_iterator(__n) };
2199 }
2200
2201 __hash_code __code = this->_M_hash_code_tr(__k);
2202 std::size_t __bkt = _M_bucket_index(__code);
2203 auto __n = _M_find_node_tr(__bkt, __k, __code);
2204 const_iterator __ite(__n);
2205 if (!__n)
2206 return { __ite, __ite };
2207
2208 auto __beg = __ite++;
2209 while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
2210 ++__ite;
2211
2212 return { __beg, __ite };
2213 }
2214#endif // C++20 __glibcxx_generic_unordered_lookup
2215
2216 // Find the node before the one whose key compares equal to k in the bucket
2217 // bkt. Return nullptr if no node is found.
2218 template<typename _Key, typename _Value, typename _Alloc,
2219 typename _ExtractKey, typename _Equal,
2220 typename _Hash, typename _RangeHash, typename _Unused,
2221 typename _RehashPolicy, typename _Traits>
2222 template<typename _Kt>
2223 auto
2224 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2225 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2226 _M_find_before_node_tr(size_type __bkt, const _Kt& __k,
2227 __hash_code __code) const
2228 -> __node_base_ptr
2229 {
2230 __node_base_ptr __prev_p = _M_buckets[__bkt];
2231 if (!__prev_p)
2232 return nullptr;
2233
2234 for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
2235 __p = __p->_M_next())
2236 {
2237 if (this->_M_equals_tr(__k, __code, *__p))
2238 return __prev_p;
2239
2240 if (__builtin_expect (
2241 !__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt, 0))
2242 break;
2243 __prev_p = __p;
2244 }
2245
2246 return nullptr;
2247 }
2248
2249 template<typename _Key, typename _Value, typename _Alloc,
2250 typename _ExtractKey, typename _Equal,
2251 typename _Hash, typename _RangeHash, typename _Unused,
2252 typename _RehashPolicy, typename _Traits>
2253 template <typename _Kt>
2254 inline auto
2255 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2256 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2257 _M_locate_tr(const _Kt& __k) const
2258 -> __location_type
2259 {
2260 __location_type __loc;
2261 const auto __size = size();
2262
2263 if (__size <= __small_size_threshold())
2264 {
2265 __loc._M_before = pointer_traits<__node_base_ptr>::
2266 pointer_to(const_cast<__node_base&>(_M_before_begin));
2267 while (__loc._M_before->_M_nxt)
2268 {
2269 if (this->_M_key_equals_tr(__k, *__loc._M_node()))
2270 return __loc;
2271 __loc._M_before = __loc._M_before->_M_nxt;
2272 }
2273 __loc._M_before = nullptr; // Didn't find it.
2274 }
2275
2276 __loc._M_hash_code = this->_M_hash_code_tr(__k);
2277 __loc._M_bucket_index = _M_bucket_index(__loc._M_hash_code);
2278
2279 if (__size > __small_size_threshold())
2280 __loc._M_before = _M_find_before_node_tr(
2281 __loc._M_bucket_index, __k, __loc._M_hash_code);
2282
2283 return __loc;
2284 }
2285
2286 template<typename _Key, typename _Value, typename _Alloc,
2287 typename _ExtractKey, typename _Equal,
2288 typename _Hash, typename _RangeHash, typename _Unused,
2289 typename _RehashPolicy, typename _Traits>
2290 auto
2291 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2292 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2293 _M_get_previous_node(size_type __bkt, __node_ptr __n)
2294 -> __node_base_ptr
2295 {
2296 __node_base_ptr __prev_n = _M_buckets[__bkt];
2297 while (__prev_n->_M_nxt != __n)
2298 __prev_n = __prev_n->_M_nxt;
2299 return __prev_n;
2300 }
2301
2302#pragma GCC diagnostic push
2303#pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
2304 template<typename _Key, typename _Value, typename _Alloc,
2305 typename _ExtractKey, typename _Equal,
2306 typename _Hash, typename _RangeHash, typename _Unused,
2307 typename _RehashPolicy, typename _Traits>
2308 template<typename... _Args>
2309 auto
2310 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2311 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2312 _M_emplace_uniq(_Args&&... __args)
2313 -> pair<iterator, bool>
2314 {
2315 const key_type* __kp = nullptr;
2316
2317 if constexpr (sizeof...(_Args) == 1)
2318 {
2319 if constexpr (__is_key_type<_Args...>)
2320 {
2321 const auto& __key = _ExtractKey{}(__args...);
2322 __kp = std::__addressof(__key);
2323 }
2324 }
2325 else if constexpr (sizeof...(_Args) == 2)
2326 {
2327 if constexpr (__is_key_type<pair<const _Args&...>>)
2328 {
2329 pair<const _Args&...> __refs(__args...);
2330 const auto& __key = _ExtractKey{}(__refs);
2331 __kp = std::__addressof(__key);
2332 }
2333 }
2334
2335 _Scoped_node __node { __node_ptr(), this }; // Do not create node yet.
2336 __hash_code __code = 0;
2337 size_type __bkt = 0;
2338
2339 if (__kp == nullptr)
2340 {
2341 // Didn't extract a key from the args, so build the node.
2342 __node._M_node
2343 = this->_M_allocate_node(std::forward<_Args>(__args)...);
2344 const key_type& __key = _ExtractKey{}(__node._M_node->_M_v());
2345 __kp = std::__addressof(__key);
2346 }
2347
2348 if (auto __loc = _M_locate(*__kp))
2349 // There is already an equivalent node, no insertion.
2350 return { iterator(__loc), false };
2351 else
2352 {
2353 __code = __loc._M_hash_code;
2354 __bkt = __loc._M_bucket_index;
2355 }
2356
2357 if (!__node._M_node)
2358 __node._M_node
2359 = this->_M_allocate_node(std::forward<_Args>(__args)...);
2360
2361 // Insert the node
2362 auto __pos = _M_insert_unique_node(__bkt, __code, __node._M_node);
2363 __node._M_node = nullptr;
2364 return { __pos, true };
2365 }
2366#pragma GCC diagnostic pop
2367
2368 template<typename _Key, typename _Value, typename _Alloc,
2369 typename _ExtractKey, typename _Equal,
2370 typename _Hash, typename _RangeHash, typename _Unused,
2371 typename _RehashPolicy, typename _Traits>
2372 template<typename... _Args>
2373 auto
2374 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2375 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2376 _M_emplace_multi(const_iterator __hint, _Args&&... __args)
2377 -> iterator
2378 {
2379 // First build the node to get its hash code.
2380 _Scoped_node __node { this, std::forward<_Args>(__args)... };
2381 const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
2382
2383 auto __res = this->_M_compute_hash_code(__hint._M_cur, __k);
2384 auto __pos
2385 = _M_insert_multi_node(__res.first, __res.second, __node._M_node);
2386 __node._M_node = nullptr;
2387 return __pos;
2388 }
2389
2390 template<typename _Key, typename _Value, typename _Alloc,
2391 typename _ExtractKey, typename _Equal,
2392 typename _Hash, typename _RangeHash, typename _Unused,
2393 typename _RehashPolicy, typename _Traits>
2394 void
2395 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2396 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2397 _M_rehash_insert(size_type __n)
2398 {
2399 using __pair_type = std::pair<bool, std::size_t>;
2400 if (__n == 0)
2401 return;
2402
2403 __rehash_guard_t __rehash_guard(_M_rehash_policy);
2404 __pair_type __do_rehash
2405 = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, __n);
2406
2407 if (__do_rehash.first)
2408 _M_rehash(__do_rehash.second, false_type{});
2409
2410 __rehash_guard._M_guarded_obj = nullptr;
2411 }
2412
2413
2414 template<typename _Key, typename _Value, typename _Alloc,
2415 typename _ExtractKey, typename _Equal,
2416 typename _Hash, typename _RangeHash, typename _Unused,
2417 typename _RehashPolicy, typename _Traits>
2418 template<typename _InputIterator>
2419 void
2420 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2421 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2422 _M_insert_range_multi(_InputIterator __first, _InputIterator __last)
2423 {
2424 _M_rehash_insert(__detail::__distance_fw(__first, __last));
2425 for (; __first != __last; ++__first)
2426 _M_emplace_multi(cend(), *__first);
2427 }
2428
2429 template<typename _Key, typename _Value, typename _Alloc,
2430 typename _ExtractKey, typename _Equal,
2431 typename _Hash, typename _RangeHash, typename _Unused,
2432 typename _RehashPolicy, typename _Traits>
2433 auto
2434 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2435 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2436 _M_compute_hash_code(__node_ptr __hint, const key_type& __k) const
2437 -> pair<__node_ptr, __hash_code>
2438 {
2439 if (size() <= __small_size_threshold())
2440 {
2441 if (__hint)
2442 {
2443 for (auto __it = __hint; __it; __it = __it->_M_next())
2444 if (this->_M_key_equals(__k, *__it))
2445 return { __it, this->_M_hash_code(*__it) };
2446 }
2447
2448 for (auto __it = _M_begin(); __it != __hint; __it = __it->_M_next())
2449 if (this->_M_key_equals(__k, *__it))
2450 return { __it, this->_M_hash_code(*__it) };
2451
2452 __hint = nullptr;
2453 }
2454
2455 return { __hint, this->_M_hash_code(__k) };
2456 }
2457
2458 template<typename _Key, typename _Value, typename _Alloc,
2459 typename _ExtractKey, typename _Equal,
2460 typename _Hash, typename _RangeHash, typename _Unused,
2461 typename _RehashPolicy, typename _Traits>
2462 auto
2463 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2464 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2465 _M_insert_unique_node(size_type __bkt, __hash_code __code,
2466 __node_ptr __node, size_type __n_elt)
2467 -> iterator
2468 {
2469 __rehash_guard_t __rehash_guard(_M_rehash_policy);
2471 = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count,
2472 __n_elt);
2473
2474 if (__do_rehash.first)
2475 {
2476 _M_rehash(__do_rehash.second, true_type{});
2477 __bkt = _M_bucket_index(__code);
2478 }
2479
2480 __rehash_guard._M_guarded_obj = nullptr;
2481 _M_store_code(*__node, __code);
2482
2483 // Always insert at the beginning of the bucket.
2484 _M_insert_bucket_begin(__bkt, __node);
2485 ++_M_element_count;
2486 return iterator(__node);
2487 }
2488
2489 template<typename _Key, typename _Value, typename _Alloc,
2490 typename _ExtractKey, typename _Equal,
2491 typename _Hash, typename _RangeHash, typename _Unused,
2492 typename _RehashPolicy, typename _Traits>
2493 auto
2494 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2495 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2496 _M_insert_multi_node(__node_ptr __hint,
2497 __hash_code __code, __node_ptr __node)
2498 -> iterator
2499 {
2500 __rehash_guard_t __rehash_guard(_M_rehash_policy);
2502 = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1);
2503
2504 if (__do_rehash.first)
2505 _M_rehash(__do_rehash.second, false_type{});
2506
2507 __rehash_guard._M_guarded_obj = nullptr;
2508 _M_store_code(*__node, __code);
2509 const key_type& __k = _ExtractKey{}(__node->_M_v());
2510 size_type __bkt = _M_bucket_index(__code);
2511
2512 // Find the node before an equivalent one or use hint if it exists and
2513 // if it is equivalent.
2514 __node_base_ptr __prev
2515 = __builtin_expect(__hint != nullptr, false)
2516 && this->_M_equals(__k, __code, *__hint)
2517 ? __hint
2518 : _M_find_before_node(__bkt, __k, __code);
2519
2520 if (__prev)
2521 {
2522 // Insert after the node before the equivalent one.
2523 __node->_M_nxt = __prev->_M_nxt;
2524 __prev->_M_nxt = __node;
2525 if (__builtin_expect(__prev == __hint, false))
2526 // hint might be the last bucket node, in this case we need to
2527 // update next bucket.
2528 if (__node->_M_nxt
2529 && !this->_M_equals(__k, __code, *__node->_M_next()))
2530 {
2531 size_type __next_bkt = _M_bucket_index(*__node->_M_next());
2532 if (__next_bkt != __bkt)
2533 _M_buckets[__next_bkt] = __node;
2534 }
2535 }
2536 else
2537 // The inserted node has no equivalent in the hashtable. We must
2538 // insert the new node at the beginning of the bucket to preserve
2539 // equivalent elements' relative positions.
2540 _M_insert_bucket_begin(__bkt, __node);
2541 ++_M_element_count;
2542 return iterator(__node);
2543 }
2544
2545 template<typename _Key, typename _Value, typename _Alloc,
2546 typename _ExtractKey, typename _Equal,
2547 typename _Hash, typename _RangeHash, typename _Unused,
2548 typename _RehashPolicy, typename _Traits>
2549 auto
2550 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2551 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2552 erase(const_iterator __it)
2553 -> iterator
2554 {
2555 __node_ptr __n = __it._M_cur;
2556 std::size_t __bkt = _M_bucket_index(*__n);
2557
2558 // Look for previous node to unlink it from the erased one, this
2559 // is why we need buckets to contain the before begin to make
2560 // this search fast.
2561 __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
2562 return _M_erase(__bkt, __prev_n, __n);
2563 }
2564
2565 template<typename _Key, typename _Value, typename _Alloc,
2566 typename _ExtractKey, typename _Equal,
2567 typename _Hash, typename _RangeHash, typename _Unused,
2568 typename _RehashPolicy, typename _Traits>
2569 auto
2570 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2571 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2572 _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n)
2573 -> iterator
2574 {
2575 if (__prev_n == _M_buckets[__bkt])
2576 _M_remove_bucket_begin(__bkt, __n->_M_next(),
2577 __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
2578 else if (__n->_M_nxt)
2579 {
2580 size_type __next_bkt = _M_bucket_index(*__n->_M_next());
2581 if (__next_bkt != __bkt)
2582 _M_buckets[__next_bkt] = __prev_n;
2583 }
2584
2585 __prev_n->_M_nxt = __n->_M_nxt;
2586 iterator __result(__n->_M_next());
2587 this->_M_deallocate_node(__n);
2588 --_M_element_count;
2589
2590 return __result;
2591 }
2592
2593#pragma GCC diagnostic push
2594#pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
2595
2596 template<typename _Key, typename _Value, typename _Alloc,
2597 typename _ExtractKey, typename _Equal,
2598 typename _Hash, typename _RangeHash, typename _Unused,
2599 typename _RehashPolicy, typename _Traits>
2600 auto
2601 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2602 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2603 erase(const key_type& __k)
2604 -> size_type
2605 {
2606 auto __loc = _M_locate(__k);
2607 if (!__loc)
2608 return 0;
2609
2610 __node_base_ptr __prev_n = __loc._M_before;
2611 __node_ptr __n = __loc._M_node();
2612 auto __bkt = __loc._M_bucket_index;
2613 if (__bkt == size_type(-1))
2614 __bkt = _M_bucket_index(*__n);
2615 if constexpr (__unique_keys::value)
2616 {
2617 _M_erase(__bkt, __prev_n, __n);
2618 return 1;
2619 }
2620 else
2621 return _M_erase_some(__bkt, __prev_n, __n);
2622 }
2623
2624 template<typename _Key, typename _Value, typename _Alloc,
2625 typename _ExtractKey, typename _Equal,
2626 typename _Hash, typename _RangeHash, typename _Unused,
2627 typename _RehashPolicy, typename _Traits>
2628 auto
2629 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2630 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2631 _M_erase_some(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n)
2632 -> size_type
2633 {
2634 // _GLIBCXX_RESOLVE_LIB_DEFECTS
2635 // 526. Is it undefined if a function in the standard changes
2636 // in parameters?
2637 // We use one loop to find all matching nodes and another to
2638 // deallocate them so that the key stays valid during the first loop.
2639 // It might be invalidated indirectly when destroying nodes.
2640 __node_ptr __n_last = __n->_M_next();
2641 while (__n_last && this->_M_node_equals(*__n, *__n_last))
2642 __n_last = __n_last->_M_next();
2643
2644 std::size_t __n_last_bkt
2645 = __n_last ? _M_bucket_index(*__n_last) : __bkt;
2646
2647 // Deallocate nodes.
2648 size_type __result = 0;
2649 do
2650 {
2651 __node_ptr __p = __n->_M_next();
2652 this->_M_deallocate_node(__n);
2653 __n = __p;
2654 ++__result;
2655 }
2656 while (__n != __n_last);
2657
2658 _M_element_count -= __result;
2659 if (__prev_n == _M_buckets[__bkt])
2660 _M_remove_bucket_begin(__bkt, __n_last, __n_last_bkt);
2661 else if (__n_last_bkt != __bkt)
2662 _M_buckets[__n_last_bkt] = __prev_n;
2663 __prev_n->_M_nxt = __n_last;
2664 return __result;
2665 }
2666
2667 template<typename _Key, typename _Value, typename _Alloc,
2668 typename _ExtractKey, typename _Equal,
2669 typename _Hash, typename _RangeHash, typename _Unused,
2670 typename _RehashPolicy, typename _Traits>
2671 template <typename _Kt>
2672 auto
2673 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2674 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2675 _M_erase_tr(const _Kt& __k)
2676 -> size_type
2677 {
2678 auto __loc = _M_locate_tr(__k);
2679 if (!__loc)
2680 return 0;
2681
2682 __node_base_ptr __prev_n = __loc._M_before;
2683 __node_ptr __n = __loc._M_node();
2684 auto __bkt = __loc._M_bucket_index;
2685 if (__bkt == size_type(-1))
2686 __bkt = _M_bucket_index(*__n);
2687 if constexpr (__unique_keys::value)
2688 {
2689 _M_erase(__bkt, __prev_n, __n);
2690 return 1;
2691 }
2692 else
2693 return _M_erase_some(__bkt, __prev_n, __n);
2694 }
2695
2696#pragma GCC diagnostic pop
2697
2698 template<typename _Key, typename _Value, typename _Alloc,
2699 typename _ExtractKey, typename _Equal,
2700 typename _Hash, typename _RangeHash, typename _Unused,
2701 typename _RehashPolicy, typename _Traits>
2702 auto
2703 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2704 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2705 erase(const_iterator __first, const_iterator __last)
2706 -> iterator
2707 {
2708 __node_ptr __n = __first._M_cur;
2709 __node_ptr __last_n = __last._M_cur;
2710 if (__n == __last_n)
2711 return iterator(__n);
2712
2713 std::size_t __bkt = _M_bucket_index(*__n);
2714
2715 __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
2716 bool __is_bucket_begin = __n == _M_bucket_begin(__bkt);
2717 std::size_t __n_bkt = __bkt;
2718 for (;;)
2719 {
2720 do
2721 {
2722 __node_ptr __tmp = __n;
2723 __n = __n->_M_next();
2724 this->_M_deallocate_node(__tmp);
2725 --_M_element_count;
2726 if (!__n)
2727 break;
2728 __n_bkt = _M_bucket_index(*__n);
2729 }
2730 while (__n != __last_n && __n_bkt == __bkt);
2731 if (__is_bucket_begin)
2732 _M_remove_bucket_begin(__bkt, __n, __n_bkt);
2733 if (__n == __last_n)
2734 break;
2735 __is_bucket_begin = true;
2736 __bkt = __n_bkt;
2737 }
2738
2739 if (__n && (__n_bkt != __bkt || __is_bucket_begin))
2740 _M_buckets[__n_bkt] = __prev_n;
2741 __prev_n->_M_nxt = __n;
2742 return iterator(__n);
2743 }
2744
2745 template<typename _Key, typename _Value, typename _Alloc,
2746 typename _ExtractKey, typename _Equal,
2747 typename _Hash, typename _RangeHash, typename _Unused,
2748 typename _RehashPolicy, typename _Traits>
2749 void
2750 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2751 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2752 clear() noexcept
2753 {
2754 this->_M_deallocate_nodes(_M_begin());
2755 std::fill_n(_M_buckets, _M_bucket_count, nullptr);
2756 _M_element_count = 0;
2757 _M_before_begin._M_nxt = nullptr;
2758 }
2759
2760 template<typename _Key, typename _Value, typename _Alloc,
2761 typename _ExtractKey, typename _Equal,
2762 typename _Hash, typename _RangeHash, typename _Unused,
2763 typename _RehashPolicy, typename _Traits>
2764 void
2765 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2766 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2767 rehash(size_type __bkt_count)
2768 {
2769 __rehash_guard_t __rehash_guard(_M_rehash_policy);
2770 __bkt_count
2771 = std::max(_M_rehash_policy._M_bkt_for_elements(_M_element_count + 1),
2772 __bkt_count);
2773 __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count);
2774
2775 if (__bkt_count != _M_bucket_count)
2776 {
2777 _M_rehash(__bkt_count, __unique_keys{});
2778 __rehash_guard._M_guarded_obj = nullptr;
2779 }
2780 }
2781
2782 // Rehash when there is no equivalent elements.
2783 template<typename _Key, typename _Value, typename _Alloc,
2784 typename _ExtractKey, typename _Equal,
2785 typename _Hash, typename _RangeHash, typename _Unused,
2786 typename _RehashPolicy, typename _Traits>
2787 void
2788 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2789 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2790 _M_rehash(size_type __bkt_count, true_type /* __uks */)
2791 {
2792 __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2793 __node_ptr __p = _M_begin();
2794 _M_before_begin._M_nxt = nullptr;
2795 std::size_t __bbegin_bkt = 0;
2796 while (__p)
2797 {
2798 __node_ptr __next = __p->_M_next();
2799 std::size_t __bkt
2800 = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2801 if (!__new_buckets[__bkt])
2802 {
2803 __p->_M_nxt = _M_before_begin._M_nxt;
2804 _M_before_begin._M_nxt = __p;
2805 __new_buckets[__bkt] = &_M_before_begin;
2806 if (__p->_M_nxt)
2807 __new_buckets[__bbegin_bkt] = __p;
2808 __bbegin_bkt = __bkt;
2809 }
2810 else
2811 {
2812 __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2813 __new_buckets[__bkt]->_M_nxt = __p;
2814 }
2815
2816 __p = __next;
2817 }
2818
2819 _M_deallocate_buckets();
2820 _M_bucket_count = __bkt_count;
2821 _M_buckets = __new_buckets;
2822 }
2823
2824 // Rehash when there can be equivalent elements, preserve their relative
2825 // order.
2826 template<typename _Key, typename _Value, typename _Alloc,
2827 typename _ExtractKey, typename _Equal,
2828 typename _Hash, typename _RangeHash, typename _Unused,
2829 typename _RehashPolicy, typename _Traits>
2830 void
2831 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2832 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2833 _M_rehash(size_type __bkt_count, false_type /* __uks */)
2834 {
2835 __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2836 __node_ptr __p = _M_begin();
2837 _M_before_begin._M_nxt = nullptr;
2838 std::size_t __bbegin_bkt = 0;
2839 std::size_t __prev_bkt = 0;
2840 __node_ptr __prev_p = nullptr;
2841 bool __check_bucket = false;
2842
2843 while (__p)
2844 {
2845 __node_ptr __next = __p->_M_next();
2846 std::size_t __bkt
2847 = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2848
2849 if (__prev_p && __prev_bkt == __bkt)
2850 {
2851 // Previous insert was already in this bucket, we insert after
2852 // the previously inserted one to preserve equivalent elements
2853 // relative order.
2854 __p->_M_nxt = __prev_p->_M_nxt;
2855 __prev_p->_M_nxt = __p;
2856
2857 // Inserting after a node in a bucket require to check that we
2858 // haven't change the bucket last node, in this case next
2859 // bucket containing its before begin node must be updated. We
2860 // schedule a check as soon as we move out of the sequence of
2861 // equivalent nodes to limit the number of checks.
2862 __check_bucket = true;
2863 }
2864 else
2865 {
2866 if (__check_bucket)
2867 {
2868 // Check if we shall update the next bucket because of
2869 // insertions into __prev_bkt bucket.
2870 if (__prev_p->_M_nxt)
2871 {
2872 std::size_t __next_bkt
2873 = __hash_code_base::_M_bucket_index(
2874 *__prev_p->_M_next(), __bkt_count);
2875 if (__next_bkt != __prev_bkt)
2876 __new_buckets[__next_bkt] = __prev_p;
2877 }
2878 __check_bucket = false;
2879 }
2880
2881 if (!__new_buckets[__bkt])
2882 {
2883 __p->_M_nxt = _M_before_begin._M_nxt;
2884 _M_before_begin._M_nxt = __p;
2885 __new_buckets[__bkt] = &_M_before_begin;
2886 if (__p->_M_nxt)
2887 __new_buckets[__bbegin_bkt] = __p;
2888 __bbegin_bkt = __bkt;
2889 }
2890 else
2891 {
2892 __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2893 __new_buckets[__bkt]->_M_nxt = __p;
2894 }
2895 }
2896 __prev_p = __p;
2897 __prev_bkt = __bkt;
2898 __p = __next;
2899 }
2900
2901 if (__check_bucket && __prev_p->_M_nxt)
2902 {
2903 std::size_t __next_bkt
2904 = __hash_code_base::_M_bucket_index(*__prev_p->_M_next(),
2905 __bkt_count);
2906 if (__next_bkt != __prev_bkt)
2907 __new_buckets[__next_bkt] = __prev_p;
2908 }
2909
2910 _M_deallocate_buckets();
2911 _M_bucket_count = __bkt_count;
2912 _M_buckets = __new_buckets;
2913 }
2914
2915#pragma GCC diagnostic push
2916#pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
2917
2918 // This is for implementing equality comparison for unordered containers,
2919 // per N3068, by John Lakos and Pablo Halpern.
2920 // Algorithmically, we follow closely the reference implementations therein.
2921 template<typename _Key, typename _Value, typename _Alloc,
2922 typename _ExtractKey, typename _Equal,
2923 typename _Hash, typename _RangeHash, typename _Unused,
2924 typename _RehashPolicy, typename _Traits>
2925 bool
2926 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2927 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2928 _M_equal(const _Hashtable& __other) const
2929 {
2930 if (size() != __other.size())
2931 return false;
2932
2933 if constexpr (__unique_keys::value)
2934 for (auto __x_n = _M_begin(); __x_n; __x_n = __x_n->_M_next())
2935 {
2936 std::size_t __ybkt = __other._M_bucket_index_ext(*__x_n);
2937 auto __prev_n = __other._M_buckets[__ybkt];
2938 if (!__prev_n)
2939 return false;
2940
2941 for (__node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);;
2942 __n = __n->_M_next())
2943 {
2944 if (__n->_M_v() == __x_n->_M_v())
2945 break;
2946
2947 if (!__n->_M_nxt
2948 || __other._M_bucket_index(*__n->_M_next()) != __ybkt)
2949 return false;
2950 }
2951 }
2952 else // non-unique keys
2953 for (auto __x_n = _M_begin(); __x_n;)
2954 {
2955 std::size_t __x_count = 1;
2956 auto __x_n_end = __x_n->_M_next();
2957 for (; __x_n_end
2958 && key_eq()(_ExtractKey{}(__x_n->_M_v()),
2959 _ExtractKey{}(__x_n_end->_M_v()));
2960 __x_n_end = __x_n_end->_M_next())
2961 ++__x_count;
2962
2963 std::size_t __ybkt = __other._M_bucket_index_ext(*__x_n);
2964 auto __y_prev_n = __other._M_buckets[__ybkt];
2965 if (!__y_prev_n)
2966 return false;
2967
2968 __node_ptr __y_n = static_cast<__node_ptr>(__y_prev_n->_M_nxt);
2969 for (;;)
2970 {
2971 if (key_eq()(_ExtractKey{}(__y_n->_M_v()),
2972 _ExtractKey{}(__x_n->_M_v())))
2973 break;
2974
2975 auto __y_ref_n = __y_n;
2976 for (__y_n = __y_n->_M_next(); __y_n; __y_n = __y_n->_M_next())
2977 if (!__other._M_node_equals(*__y_ref_n, *__y_n))
2978 break;
2979
2980 if (!__y_n || __other._M_bucket_index(*__y_n) != __ybkt)
2981 return false;
2982 }
2983
2984 auto __y_n_end = __y_n;
2985 for (; __y_n_end; __y_n_end = __y_n_end->_M_next())
2986 if (--__x_count == 0)
2987 break;
2988
2989 if (__x_count != 0)
2990 return false;
2991
2992 const_iterator __itx(__x_n), __itx_end(__x_n_end);
2993 const_iterator __ity(__y_n);
2994 if (!std::is_permutation(__itx, __itx_end, __ity))
2995 return false;
2996
2997 __x_n = __x_n_end;
2998 }
2999
3000 return true;
3001 }
3002#pragma GCC diagnostic pop
3003
3004#ifdef __glibcxx_node_extract // >= C++17 && HOSTED
3005 template<typename, typename, typename> class _Hash_merge_helper { };
3006#endif // C++17
3007
3008#if __cpp_deduction_guides >= 201606
3009 // Used to constrain deduction guides
3010 template<typename _Hash>
3011 using _RequireNotAllocatorOrIntegral
3012 = __enable_if_t<!__or_<is_integral<_Hash>, __is_allocator<_Hash>>::value>;
3013#endif
3014
3015#ifdef __glibcxx_associative_heterogeneous_erasure // C++ >= 23
3016template <typename _Kt, typename _Container>
3017 concept __heterogeneous_hash_key =
3018 __transparent_comparator<typename _Container::hasher> &&
3019 __transparent_comparator<typename _Container::key_equal> &&
3020 __heterogeneous_key<_Kt, _Container>;
3021#endif
3022
3023/// @endcond
3024_GLIBCXX_END_NAMESPACE_VERSION
3025} // namespace std
3026
3027#pragma GCC diagnostic pop
3028
3029#endif // _HASHTABLE_H
__bool_constant< true > true_type
The type used as a compile-time boolean with true value.
Definition type_traits:119
pair(_T1, _T2) -> pair< _T1, _T2 >
Two pairs are equal iff their members are equal.
auto declval() noexcept -> decltype(__declval< _Tp >(0))
Definition type_traits:2708
constexpr tuple< _Elements &&... > forward_as_tuple(_Elements &&... __args) noexcept
Create a tuple of lvalue or rvalue references to the arguments.
Definition tuple:2735
constexpr std::remove_reference< _Tp >::type && move(_Tp &&__t) noexcept
Convert a value to an rvalue.
Definition move.h:138
constexpr piecewise_construct_t piecewise_construct
Tag for piecewise construction of std::pair objects.
Definition stl_pair.h:82
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
Definition move.h:52
constexpr _Tp && forward(typename std::remove_reference< _Tp >::type &__t) noexcept
Forward an lvalue.
Definition move.h:72
constexpr const _Tp & max(const _Tp &, const _Tp &)
This does what you think it does.
ISO C++ entities toplevel namespace is std.
constexpr iterator_traits< _InputIterator >::difference_type distance(_InputIterator __first, _InputIterator __last)
A generalization of pointer arithmetic.
is_default_constructible
Definition type_traits:1241
Struct holding two objects of arbitrary type.
Definition stl_pair.h:304
_T1 first
The first member.
Definition stl_pair.h:308
_T2 second
The second member.
Definition stl_pair.h:309