libstdc++
bits/hashtable.h
Go to the documentation of this file.
1// hashtable.h header -*- C++ -*-
2
3// Copyright (C) 2007-2025 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file bits/hashtable.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{unordered_map, unordered_set}
28 */
29
30#ifndef _HASHTABLE_H
31#define _HASHTABLE_H 1
32
33#ifdef _GLIBCXX_SYSHDR
34#pragma GCC system_header
35#endif
36
39#include <bits/stl_algobase.h> // fill_n, is_permutation
40#include <bits/stl_function.h> // __has_is_transparent_t
41#ifdef __glibcxx_node_extract // >= C++17 && HOSTED
42# include <bits/node_handle.h>
43#endif
44
45#pragma GCC diagnostic push
46#pragma GCC diagnostic ignored "-Wc++11-extensions"
47
48namespace std _GLIBCXX_VISIBILITY(default)
49{
50_GLIBCXX_BEGIN_NAMESPACE_VERSION
51/// @cond undocumented
52
53 template<typename _Tp, typename _Hash>
54 using __cache_default
55 = __not_<__and_<// Do not cache for fast hasher.
57 // Mandatory for the rehash process.
58 __is_nothrow_invocable<const _Hash&, const _Tp&>>>;
59
60 // Helper to conditionally delete the default constructor.
61 // The _Hash_node_base type is used to distinguish this specialization
62 // from any other potentially-overlapping subobjects of the hashtable.
63 template<typename _Equal, typename _Hash, typename _Allocator>
64 using _Hashtable_enable_default_ctor
65 = _Enable_default_constructor<__and_<is_default_constructible<_Equal>,
68 __detail::_Hash_node_base>;
69
70 /**
71 * Primary class template _Hashtable.
72 *
73 * @ingroup hashtable-detail
74 *
75 * @tparam _Value CopyConstructible type.
76 *
77 * @tparam _Key CopyConstructible type.
78 *
79 * @tparam _Alloc An allocator type
80 * ([lib.allocator.requirements]) whose _Alloc::value_type is
81 * _Value. As a conforming extension, we allow for
82 * _Alloc::value_type != _Value.
83 *
84 * @tparam _ExtractKey Function object that takes an object of type
85 * _Value and returns a value of type _Key.
86 *
87 * @tparam _Equal Function object that takes two objects of type k
88 * and returns a bool-like value that is true if the two objects
89 * are considered equal.
90 *
91 * @tparam _Hash The hash function. A unary function object with
92 * argument type _Key and result type size_t. Return values should
93 * be distributed over the entire range [0, numeric_limits<size_t>:::max()].
94 *
95 * @tparam _RangeHash The range-hashing function (in the terminology of
96 * Tavori and Dreizin). A binary function object whose argument
97 * types and result type are all size_t. Given arguments r and N,
98 * the return value is in the range [0, N).
99 *
100 * @tparam _Unused Not used.
101 *
102 * @tparam _RehashPolicy Policy class with three members, all of
103 * which govern the bucket count. _M_next_bkt(n) returns a bucket
104 * count no smaller than n. _M_bkt_for_elements(n) returns a
105 * bucket count appropriate for an element count of n.
106 * _M_need_rehash(n_bkt, n_elt, n_ins) determines whether, if the
107 * current bucket count is n_bkt and the current element count is
108 * n_elt, we need to increase the bucket count for n_ins insertions.
109 * If so, returns make_pair(true, n), where n is the new bucket count. If
110 * not, returns make_pair(false, <anything>)
111 *
112 * @tparam _Traits Compile-time class with three boolean
113 * std::integral_constant members: __cache_hash_code, __constant_iterators,
114 * __unique_keys.
115 *
116 * Each _Hashtable data structure has:
117 *
118 * - _Bucket[] _M_buckets
119 * - _Hash_node_base _M_before_begin
120 * - size_type _M_bucket_count
121 * - size_type _M_element_count
122 *
123 * with _Bucket being _Hash_node_base* and _Hash_node containing:
124 *
125 * - _Hash_node* _M_next
126 * - Tp _M_value
127 * - size_t _M_hash_code if cache_hash_code is true
128 *
129 * In terms of Standard containers the hashtable is like the aggregation of:
130 *
131 * - std::forward_list<_Node> containing the elements
132 * - std::vector<std::forward_list<_Node>::iterator> representing the buckets
133 *
134 * The non-empty buckets contain the node before the first node in the
135 * bucket. This design makes it possible to implement something like a
136 * std::forward_list::insert_after on container insertion and
137 * std::forward_list::erase_after on container erase
138 * calls. _M_before_begin is equivalent to
139 * std::forward_list::before_begin. Empty buckets contain
140 * nullptr. Note that one of the non-empty buckets contains
141 * &_M_before_begin which is not a dereferenceable node so the
142 * node pointer in a bucket shall never be dereferenced, only its
143 * next node can be.
144 *
145 * Walking through a bucket's nodes requires a check on the hash code to
146 * see if each node is still in the bucket. Such a design assumes a
147 * quite efficient hash functor and is one of the reasons it is
148 * highly advisable to set __cache_hash_code to true.
149 *
150 * The container iterators are simply built from nodes. This way
151 * incrementing the iterator is perfectly efficient independent of
152 * how many empty buckets there are in the container.
153 *
154 * On insert we compute the element's hash code and use it to find the
155 * bucket index. If the element must be inserted in an empty bucket
156 * we add it at the beginning of the singly linked list and make the
157 * bucket point to _M_before_begin. The bucket that used to point to
158 * _M_before_begin, if any, is updated to point to its new before
159 * begin node.
160 *
161 * Note that all equivalent values, if any, are next to each other, if
162 * we find a non-equivalent value after an equivalent one it means that
163 * we won't find any new equivalent value.
164 *
165 * On erase, the simple iterator design requires using the hash
166 * functor to get the index of the bucket to update. For this
167 * reason, when __cache_hash_code is set to false the hash functor must
168 * not throw and this is enforced by a static assertion.
169 *
170 * Functionality is implemented by decomposition into base classes,
171 * where the derived _Hashtable class is used in _Map_base and
172 * _Rehash_base base classes to access the
173 * "this" pointer. _Hashtable_base is used in the base classes as a
174 * non-recursive, fully-completed-type so that detailed nested type
175 * information, such as iterator type and node type, can be
176 * used. This is similar to the "Curiously Recurring Template
177 * Pattern" (CRTP) technique, but uses a reconstructed, not
178 * explicitly passed, template pattern.
179 *
180 * Base class templates are:
181 * - __detail::_Hashtable_base
182 * - __detail::_Map_base
183 * - __detail::_Rehash_base
184 */
185 template<typename _Key, typename _Value, typename _Alloc,
186 typename _ExtractKey, typename _Equal,
187 typename _Hash, typename _RangeHash, typename _Unused,
188 typename _RehashPolicy, typename _Traits>
189 class _Hashtable
190 : public __detail::_Hashtable_base<_Key, _Value, _ExtractKey, _Equal,
191 _Hash, _RangeHash, _Unused, _Traits>,
192 public __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
193 _Hash, _RangeHash, _Unused,
194 _RehashPolicy, _Traits>,
195 public __detail::_Rehash_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
196 _Hash, _RangeHash, _Unused,
197 _RehashPolicy, _Traits>,
198 private __detail::_Hashtable_alloc<
199 __alloc_rebind<_Alloc,
200 __detail::_Hash_node<_Value,
201 _Traits::__hash_cached::value>>>,
202 private _Hashtable_enable_default_ctor<_Equal, _Hash, _Alloc>
203 {
204 static_assert(is_same<typename remove_cv<_Value>::type, _Value>::value,
205 "unordered container must have a non-const, non-volatile value_type");
206#if __cplusplus > 201703L || defined __STRICT_ANSI__
207 static_assert(is_same<typename _Alloc::value_type, _Value>{},
208 "unordered container must have the same value_type as its allocator");
209#endif
210 static_assert(is_copy_constructible<_Hash>::value,
211 "hash function must be copy constructible");
212
213 using __traits_type = _Traits;
214 using __hash_cached = typename __traits_type::__hash_cached;
215 using __constant_iterators = typename __traits_type::__constant_iterators;
216 using __node_type = __detail::_Hash_node<_Value, __hash_cached::value>;
217 using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
218
219 using __hashtable_alloc = __detail::_Hashtable_alloc<__node_alloc_type>;
220
221 using __node_value_type =
222 __detail::_Hash_node_value<_Value, __hash_cached::value>;
223 using __node_ptr = typename __hashtable_alloc::__node_ptr;
224 using __value_alloc_traits =
225 typename __hashtable_alloc::__value_alloc_traits;
226 using __node_alloc_traits =
227 typename __hashtable_alloc::__node_alloc_traits;
228 using __node_base = typename __hashtable_alloc::__node_base;
229 using __node_base_ptr = typename __hashtable_alloc::__node_base_ptr;
230 using __buckets_ptr = typename __hashtable_alloc::__buckets_ptr;
231
232 using __enable_default_ctor
233 = _Hashtable_enable_default_ctor<_Equal, _Hash, _Alloc>;
234 using __rehash_guard_t
235 = __detail::_RehashStateGuard<_RehashPolicy>;
236
237 public:
238 typedef _Key key_type;
239 typedef _Value value_type;
240 typedef _Alloc allocator_type;
241 typedef _Equal key_equal;
242
243 // mapped_type, if present, comes from _Map_base.
244 // hasher, if present, comes from _Hash_code_base/_Hashtable_base.
245 typedef typename __value_alloc_traits::pointer pointer;
246 typedef typename __value_alloc_traits::const_pointer const_pointer;
247 typedef value_type& reference;
248 typedef const value_type& const_reference;
249
250 using iterator
251 = __detail::_Node_iterator<_Value, __constant_iterators::value,
252 __hash_cached::value>;
253
254 using const_iterator
255 = __detail::_Node_const_iterator<_Value, __constant_iterators::value,
256 __hash_cached::value>;
257
258 using local_iterator = __detail::_Local_iterator<key_type, _Value,
259 _ExtractKey, _Hash, _RangeHash, _Unused,
260 __constant_iterators::value,
261 __hash_cached::value>;
262
263 using const_local_iterator = __detail::_Local_const_iterator<
264 key_type, _Value,
265 _ExtractKey, _Hash, _RangeHash, _Unused,
266 __constant_iterators::value, __hash_cached::value>;
267
268 private:
269 using __rehash_type = _RehashPolicy;
270
271 using __unique_keys = typename __traits_type::__unique_keys;
272
273 using __hashtable_base = __detail::
274 _Hashtable_base<_Key, _Value, _ExtractKey,
275 _Equal, _Hash, _RangeHash, _Unused, _Traits>;
276
277 using __hash_code_base = typename __hashtable_base::__hash_code_base;
278 using __hash_code = typename __hashtable_base::__hash_code;
279 using __ireturn_type = __conditional_t<__unique_keys::value,
281 iterator>;
282
283 using __map_base = __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey,
284 _Equal, _Hash, _RangeHash, _Unused,
285 _RehashPolicy, _Traits>;
286
287 using __rehash_base = __detail::_Rehash_base<_Key, _Value, _Alloc,
288 _ExtractKey, _Equal,
289 _Hash, _RangeHash, _Unused,
290 _RehashPolicy, _Traits>;
291
292 using __node_builder_t = __detail::_NodeBuilder<_ExtractKey>;
293
294 // Simple RAII type for managing a node containing an element
295 struct _Scoped_node
296 {
297 // Take ownership of a node with a constructed element.
298 _Scoped_node(__node_ptr __n, __hashtable_alloc* __h)
299 : _M_h(__h), _M_node(__n) { }
300
301 // Allocate a node and construct an element within it.
302 template<typename... _Args>
303 _Scoped_node(__hashtable_alloc* __h, _Args&&... __args)
304 : _M_h(__h),
305 _M_node(__h->_M_allocate_node(std::forward<_Args>(__args)...))
306 { }
307
308 // Destroy element and deallocate node.
309 ~_Scoped_node() { if (_M_node) _M_h->_M_deallocate_node(_M_node); };
310
311 _Scoped_node(const _Scoped_node&) = delete;
312 _Scoped_node& operator=(const _Scoped_node&) = delete;
313
314 __hashtable_alloc* _M_h;
315 __node_ptr _M_node;
316 };
317
318 // Compile-time diagnostics.
319
320 // _Hash_code_base has everything protected, so use this derived type to
321 // access it.
322 struct __hash_code_base_access : __hash_code_base
323 { using __hash_code_base::_M_bucket_index; };
324
325 // To get bucket index we need _RangeHash to be non-throwing.
326 static_assert(is_nothrow_default_constructible<_RangeHash>::value,
327 "Functor used to map hash code to bucket index"
328 " must be nothrow default constructible");
329 static_assert(noexcept(
330 std::declval<const _RangeHash&>()((std::size_t)0, (std::size_t)0)),
331 "Functor used to map hash code to bucket index must be"
332 " noexcept");
333
334 // To compute bucket index we also need _ExtractKey to be non-throwing.
335 static_assert(is_nothrow_default_constructible<_ExtractKey>::value,
336 "_ExtractKey must be nothrow default constructible");
337 static_assert(noexcept(
339 "_ExtractKey functor must be noexcept invocable");
340
341 template<typename _Keya, typename _Valuea, typename _Alloca,
342 typename _ExtractKeya, typename _Equala,
343 typename _Hasha, typename _RangeHasha, typename _Unuseda,
344 typename _RehashPolicya, typename _Traitsa,
345 bool _Unique_keysa>
346 friend struct __detail::_Map_base;
347
348 public:
349 using size_type = typename __hashtable_base::size_type;
350 using difference_type = typename __hashtable_base::difference_type;
351
352#ifdef __glibcxx_node_extract // >= C++17 && HOSTED
353 using node_type = _Node_handle<_Key, _Value, __node_alloc_type>;
354 using insert_return_type = _Node_insert_return<iterator, node_type>;
355#endif
356
357 private:
358 __buckets_ptr _M_buckets = &_M_single_bucket;
359 size_type _M_bucket_count = 1;
360 __node_base _M_before_begin;
361 size_type _M_element_count = 0;
362 _RehashPolicy _M_rehash_policy;
363
364 // A single bucket used when only need for 1 bucket. Especially
365 // interesting in move semantic to leave hashtable with only 1 bucket
366 // which is not allocated so that we can have those operations noexcept
367 // qualified.
368 // Note that we can't leave hashtable with 0 bucket without adding
369 // numerous checks in the code to avoid 0 modulus.
370 __node_base_ptr _M_single_bucket = nullptr;
371
372 void
373 _M_update_bbegin()
374 {
375 if (auto __begin = _M_begin())
376 _M_buckets[_M_bucket_index(*__begin)] = &_M_before_begin;
377 }
378
379 void
380 _M_update_bbegin(__node_ptr __n)
381 {
382 _M_before_begin._M_nxt = __n;
383 _M_update_bbegin();
384 }
385
386 bool
387 _M_uses_single_bucket(__buckets_ptr __bkts) const
388 { return __builtin_expect(__bkts == &_M_single_bucket, false); }
389
390 bool
391 _M_uses_single_bucket() const
392 { return _M_uses_single_bucket(_M_buckets); }
393
394 static constexpr size_t
395 __small_size_threshold() noexcept
396 {
397 return
398 __detail::_Hashtable_hash_traits<_Hash>::__small_size_threshold();
399 }
400
401 __hashtable_alloc&
402 _M_base_alloc() { return *this; }
403
404 __buckets_ptr
405 _M_allocate_buckets(size_type __bkt_count)
406 {
407 if (__builtin_expect(__bkt_count == 1, false))
408 {
409 _M_single_bucket = nullptr;
410 return &_M_single_bucket;
411 }
412
413 return __hashtable_alloc::_M_allocate_buckets(__bkt_count);
414 }
415
416 void
417 _M_deallocate_buckets(__buckets_ptr __bkts, size_type __bkt_count)
418 {
419 if (_M_uses_single_bucket(__bkts))
420 return;
421
422 __hashtable_alloc::_M_deallocate_buckets(__bkts, __bkt_count);
423 }
424
425 void
426 _M_deallocate_buckets()
427 { _M_deallocate_buckets(_M_buckets, _M_bucket_count); }
428
429 // Gets bucket begin, deals with the fact that non-empty buckets contain
430 // their before begin node.
431 __node_ptr
432 _M_bucket_begin(size_type __bkt) const
433 {
434 __node_base_ptr __n = _M_buckets[__bkt];
435 return __n ? static_cast<__node_ptr>(__n->_M_nxt) : nullptr;
436 }
437
438 __node_ptr
439 _M_begin() const
440 { return static_cast<__node_ptr>(_M_before_begin._M_nxt); }
441
442 // Assign *this using another _Hashtable instance. Whether elements
443 // are copied or moved depends on the _Ht reference.
444 template<typename _Ht>
445 void
446 _M_assign_elements(_Ht&&);
447
448 template<typename _Ht>
449 void
450 _M_assign(_Ht&& __ht)
451 {
452 __detail::_AllocNode<__node_alloc_type> __alloc_node_gen(*this);
453 _M_assign(std::forward<_Ht>(__ht), __alloc_node_gen);
454 }
455
456 template<typename _Ht, typename _NodeGenerator>
457 void
458 _M_assign(_Ht&&, _NodeGenerator&);
459
460 void
461 _M_move_assign(_Hashtable&&, true_type);
462
463 void
464 _M_move_assign(_Hashtable&&, false_type);
465
466 void
467 _M_reset() noexcept;
468
469 _Hashtable(const _Hash& __h, const _Equal& __eq,
470 const allocator_type& __a)
471 : __hashtable_base(__h, __eq),
472 __hashtable_alloc(__node_alloc_type(__a)),
473 __enable_default_ctor(_Enable_default_constructor_tag{})
474 { }
475
476 template<bool _No_realloc = true>
477 static constexpr bool
478 _S_nothrow_move()
479 {
480#if __cpp_constexpr >= 201304 // >= C++14
481# pragma GCC diagnostic push
482# pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
483 if constexpr (_No_realloc)
484 if constexpr (is_nothrow_copy_constructible<_Hash>::value)
485 return is_nothrow_copy_constructible<_Equal>::value;
486 return false;
487# pragma GCC diagnostic pop
488#else // In C++11 a constexpr function must be a single statement.
489 return __and_<__bool_constant<_No_realloc>,
490 is_nothrow_copy_constructible<_Hash>,
491 is_nothrow_copy_constructible<_Equal>>::value;
492#endif
493 }
494
495 _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
496 true_type /* alloc always equal */)
497 noexcept(_S_nothrow_move());
498
499 _Hashtable(_Hashtable&&, __node_alloc_type&&,
500 false_type /* alloc always equal */);
501
502 template<typename _InputIterator>
503 _Hashtable(_InputIterator __first, _InputIterator __last,
504 size_type __bkt_count_hint,
505 const _Hash&, const _Equal&, const allocator_type&,
506 true_type __uks);
507
508 template<typename _InputIterator>
509 _Hashtable(_InputIterator __first, _InputIterator __last,
510 size_type __bkt_count_hint,
511 const _Hash&, const _Equal&, const allocator_type&,
512 false_type __uks);
513
514 public:
515 // Constructor, destructor, assignment, swap
516 _Hashtable() = default;
517
518 _Hashtable(const _Hashtable&);
519
520 _Hashtable(const _Hashtable&, const allocator_type&);
521
522 explicit
523 _Hashtable(size_type __bkt_count_hint,
524 const _Hash& __hf = _Hash(),
525 const key_equal& __eql = key_equal(),
526 const allocator_type& __a = allocator_type());
527
528 // Use delegating constructors.
529 _Hashtable(_Hashtable&& __ht)
530 noexcept(_S_nothrow_move())
531 : _Hashtable(std::move(__ht), std::move(__ht._M_node_allocator()),
532 true_type{})
533 { }
534
535 _Hashtable(_Hashtable&& __ht, const allocator_type& __a)
536 noexcept(_S_nothrow_move<__node_alloc_traits::_S_always_equal()>())
537 : _Hashtable(std::move(__ht), __node_alloc_type(__a),
538 typename __node_alloc_traits::is_always_equal{})
539 { }
540
541 explicit
542 _Hashtable(const allocator_type& __a)
543 : __hashtable_alloc(__node_alloc_type(__a)),
544 __enable_default_ctor(_Enable_default_constructor_tag{})
545 { }
546
547 template<typename _InputIterator>
548 _Hashtable(_InputIterator __f, _InputIterator __l,
549 size_type __bkt_count_hint = 0,
550 const _Hash& __hf = _Hash(),
551 const key_equal& __eql = key_equal(),
552 const allocator_type& __a = allocator_type())
553 : _Hashtable(__f, __l, __bkt_count_hint, __hf, __eql, __a,
554 __unique_keys{})
555 { }
556
557 _Hashtable(initializer_list<value_type> __l,
558 size_type __bkt_count_hint = 0,
559 const _Hash& __hf = _Hash(),
560 const key_equal& __eql = key_equal(),
561 const allocator_type& __a = allocator_type())
562 : _Hashtable(__l.begin(), __l.end(), __bkt_count_hint,
563 __hf, __eql, __a, __unique_keys{})
564 { }
565
566 _Hashtable&
567 operator=(const _Hashtable& __ht);
568
569 _Hashtable&
570 operator=(_Hashtable&& __ht)
571 noexcept(__node_alloc_traits::_S_nothrow_move()
572 && is_nothrow_move_assignable<_Hash>::value
573 && is_nothrow_move_assignable<_Equal>::value)
574 {
575 constexpr bool __move_storage =
576 __node_alloc_traits::_S_propagate_on_move_assign()
577 || __node_alloc_traits::_S_always_equal();
578 _M_move_assign(std::move(__ht), __bool_constant<__move_storage>());
579 return *this;
580 }
581
582#pragma GCC diagnostic push
583#pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
584 _Hashtable&
585 operator=(initializer_list<value_type> __l)
586 {
587 using __reuse_or_alloc_node_gen_t =
588 __detail::_ReuseOrAllocNode<__node_alloc_type>;
589
590 __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
591 _M_before_begin._M_nxt = nullptr;
592 clear();
593
594 // We assume that all elements of __l are likely to be inserted.
595 auto __l_bkt_count = _M_rehash_policy._M_bkt_for_elements(__l.size());
596
597 // Excess buckets might have been intentionally reserved by the user,
598 // so rehash if we need to grow, but don't shrink.
599 if (_M_bucket_count < __l_bkt_count)
600 rehash(__l_bkt_count);
601
602 __hash_code __code;
603 size_type __bkt;
604 for (auto& __e : __l)
605 {
606 const key_type& __k = _ExtractKey{}(__e);
607
608 if constexpr (__unique_keys::value)
609 {
610 if (auto __loc = _M_locate(__k))
611 continue; // Found existing element with equivalent key
612 else
613 {
614 __code = __loc._M_hash_code;
615 __bkt = __loc._M_bucket_index;
616 }
617 }
618 else
619 {
620 __code = this->_M_hash_code(__k);
621 __bkt = _M_bucket_index(__code);
622 }
623
624 _M_insert_unique_node(__bkt, __code, __roan(__e));
625 }
626
627 return *this;
628 }
629#pragma GCC diagnostic pop
630
631 ~_Hashtable() noexcept;
632
633 void
634 swap(_Hashtable&)
635 noexcept(__and_<__is_nothrow_swappable<_Hash>,
636 __is_nothrow_swappable<_Equal>>::value);
637
638 // Basic container operations
639 iterator
640 begin() noexcept
641 { return iterator(_M_begin()); }
642
643 const_iterator
644 begin() const noexcept
645 { return const_iterator(_M_begin()); }
646
647 iterator
648 end() noexcept
649 { return iterator(nullptr); }
650
651 const_iterator
652 end() const noexcept
653 { return const_iterator(nullptr); }
654
655 const_iterator
656 cbegin() const noexcept
657 { return const_iterator(_M_begin()); }
658
659 const_iterator
660 cend() const noexcept
661 { return const_iterator(nullptr); }
662
663 size_type
664 size() const noexcept
665 { return _M_element_count; }
666
667 _GLIBCXX_NODISCARD bool
668 empty() const noexcept
669 { return size() == 0; }
670
671 allocator_type
672 get_allocator() const noexcept
673 { return allocator_type(this->_M_node_allocator()); }
674
675 size_type
676 max_size() const noexcept
677 { return __node_alloc_traits::max_size(this->_M_node_allocator()); }
678
679 // Observers
680 key_equal
681 key_eq() const
682 { return this->_M_eq(); }
683
684 // hash_function, if present, comes from _Hash_code_base.
685
686 // Bucket operations
687 size_type
688 bucket_count() const noexcept
689 { return _M_bucket_count; }
690
691 size_type
692 max_bucket_count() const noexcept
693 { return max_size(); }
694
695 size_type
696 bucket_size(size_type __bkt) const
697 { return std::distance(begin(__bkt), end(__bkt)); }
698
699 size_type
700 bucket(const key_type& __k) const
701 { return _M_bucket_index(this->_M_hash_code(__k)); }
702
703 local_iterator
704 begin(size_type __bkt)
705 {
706 return local_iterator(*this, _M_bucket_begin(__bkt),
707 __bkt, _M_bucket_count);
708 }
709
710 local_iterator
711 end(size_type __bkt)
712 { return local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
713
714 const_local_iterator
715 begin(size_type __bkt) const
716 {
717 return const_local_iterator(*this, _M_bucket_begin(__bkt),
718 __bkt, _M_bucket_count);
719 }
720
721 const_local_iterator
722 end(size_type __bkt) const
723 { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
724
725 // DR 691.
726 const_local_iterator
727 cbegin(size_type __bkt) const
728 {
729 return const_local_iterator(*this, _M_bucket_begin(__bkt),
730 __bkt, _M_bucket_count);
731 }
732
733 const_local_iterator
734 cend(size_type __bkt) const
735 { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
736
737 float
738 load_factor() const noexcept
739 {
740 return static_cast<float>(size()) / static_cast<float>(bucket_count());
741 }
742
743 // max_load_factor, if present, comes from _Rehash_base.
744
745 // Generalization of max_load_factor. Extension, not found in
746 // TR1. Only useful if _RehashPolicy is something other than
747 // the default.
748 const _RehashPolicy&
749 __rehash_policy() const
750 { return _M_rehash_policy; }
751
752 void
753 __rehash_policy(const _RehashPolicy& __pol)
754 { _M_rehash_policy = __pol; }
755
756 // Lookup.
757 iterator
758 find(const key_type& __k);
759
760 const_iterator
761 find(const key_type& __k) const;
762
763 size_type
764 count(const key_type& __k) const;
765
767 equal_range(const key_type& __k);
768
770 equal_range(const key_type& __k) const;
771
772#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED
773 template<typename _Kt,
774 typename = __has_is_transparent_t<_Hash, _Kt>,
775 typename = __has_is_transparent_t<_Equal, _Kt>>
776 iterator
777 _M_find_tr(const _Kt& __k);
778
779 template<typename _Kt,
780 typename = __has_is_transparent_t<_Hash, _Kt>,
781 typename = __has_is_transparent_t<_Equal, _Kt>>
782 const_iterator
783 _M_find_tr(const _Kt& __k) const;
784
785 template<typename _Kt,
786 typename = __has_is_transparent_t<_Hash, _Kt>,
787 typename = __has_is_transparent_t<_Equal, _Kt>>
788 size_type
789 _M_count_tr(const _Kt& __k) const;
790
791 template<typename _Kt,
792 typename = __has_is_transparent_t<_Hash, _Kt>,
793 typename = __has_is_transparent_t<_Equal, _Kt>>
794 pair<iterator, iterator>
795 _M_equal_range_tr(const _Kt& __k);
796
797 template<typename _Kt,
798 typename = __has_is_transparent_t<_Hash, _Kt>,
799 typename = __has_is_transparent_t<_Equal, _Kt>>
800 pair<const_iterator, const_iterator>
801 _M_equal_range_tr(const _Kt& __k) const;
802#endif // __glibcxx_generic_unordered_lookup
803
804 void _M_rehash_insert(size_type __n);
805
806 private:
807 // Bucket index computation helpers.
808 size_type
809 _M_bucket_index(const __node_value_type& __n) const noexcept
810 { return __hash_code_base::_M_bucket_index(__n, _M_bucket_count); }
811
812 size_type
813 _M_bucket_index(__hash_code __c) const
814 { return __hash_code_base::_M_bucket_index(__c, _M_bucket_count); }
815
816#pragma GCC diagnostic push
817#pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
818 // Get hash code for a node that comes from another _Hashtable.
819 // Reuse a cached hash code if the hash function is stateless,
820 // otherwise recalculate it using our own hash function.
821 __hash_code
822 _M_hash_code_ext(const __node_value_type& __from) const
823 {
824 if constexpr (__and_<__hash_cached, is_empty<_Hash>>::value)
825 return __from._M_hash_code;
826 else
827 return this->_M_hash_code(_ExtractKey{}(__from._M_v()));
828 }
829
830 // Like _M_bucket_index but when the node is coming from another
831 // container instance.
832 size_type
833 _M_bucket_index_ext(const __node_value_type& __from) const
834 { return _RangeHash{}(_M_hash_code_ext(__from), _M_bucket_count); }
835
836 void
837 _M_copy_code(__node_value_type& __to,
838 const __node_value_type& __from) const
839 {
840 if constexpr (__hash_cached::value)
841 __to._M_hash_code = _M_hash_code_ext(__from);
842 }
843
844 void
845 _M_store_code(__node_value_type& __to, __hash_code __code) const
846 {
847 if constexpr (__hash_cached::value)
848 __to._M_hash_code = __code;
849 }
850#pragma GCC diagnostic pop
851
852 // Find and insert helper functions and types
853
854 // Find the node before the one matching the criteria.
855 __node_base_ptr
856 _M_find_before_node(size_type, const key_type&, __hash_code) const;
857
858 template<typename _Kt>
859 __node_base_ptr
860 _M_find_before_node_tr(size_type, const _Kt&, __hash_code) const;
861
862 // A pointer to a particular node and/or a hash code and bucket index
863 // where such a node would be found in the container.
864 struct __location_type
865 {
866 // True if _M_node() is a valid node pointer.
867 explicit operator bool() const noexcept
868 { return static_cast<bool>(_M_before); }
869
870 // An iterator that refers to the node, or end().
871 explicit operator iterator() const noexcept
872 { return iterator(_M_node()); }
873
874 // A const_iterator that refers to the node, or cend().
875 explicit operator const_iterator() const noexcept
876 { return const_iterator(_M_node()); }
877
878 // A pointer to the node, or null.
879 __node_ptr _M_node() const
880 {
881 if (_M_before)
882 return static_cast<__node_ptr>(_M_before->_M_nxt);
883 return __node_ptr();
884 }
885
886 __node_base_ptr _M_before{}; // Must only be used to get _M_nxt
887 __hash_code _M_hash_code{}; // Only valid if _M_bucket_index != -1
888 size_type _M_bucket_index = size_type(-1);
889 };
890
891 // Adaptive lookup to find key, or which bucket it would be in.
892 // For a container smaller than the small size threshold use a linear
893 // search through the whole container, just testing for equality.
894 // Otherwise, calculate the hash code and bucket index for the key,
895 // and search in that bucket.
896 // The return value will have a pointer to the node _before_ the first
897 // node matching the key, if any such node exists. Returning the node
898 // before the desired one allows the result to be used for erasure.
899 // If no matching element is present, the hash code and bucket for the
900 // key will be set, allowing a new node to be inserted at that location.
901 // (The hash code and bucket might also be set when a node is found.)
902 // The _M_before pointer might point to _M_before_begin, so must not be
903 // cast to __node_ptr, and it must not be used to modify *_M_before
904 // except in non-const member functions, such as erase.
905 __location_type
906 _M_locate(const key_type& __k) const;
907
908 __node_ptr
909 _M_find_node(size_type __bkt, const key_type& __key,
910 __hash_code __c) const
911 {
912 if (__node_base_ptr __before_n = _M_find_before_node(__bkt, __key, __c))
913 return static_cast<__node_ptr>(__before_n->_M_nxt);
914 return nullptr;
915 }
916
917 template<typename _Kt>
918 __node_ptr
919 _M_find_node_tr(size_type __bkt, const _Kt& __key,
920 __hash_code __c) const
921 {
922 if (auto __before_n = _M_find_before_node_tr(__bkt, __key, __c))
923 return static_cast<__node_ptr>(__before_n->_M_nxt);
924 return nullptr;
925 }
926
927 // Insert a node at the beginning of a bucket.
928 void
929 _M_insert_bucket_begin(size_type __bkt, __node_ptr __node)
930 {
931 if (_M_buckets[__bkt])
932 {
933 // Bucket is not empty, we just need to insert the new node
934 // after the bucket before begin.
935 __node->_M_nxt = _M_buckets[__bkt]->_M_nxt;
936 _M_buckets[__bkt]->_M_nxt = __node;
937 }
938 else
939 {
940 // The bucket is empty, the new node is inserted at the
941 // beginning of the singly-linked list and the bucket will
942 // contain _M_before_begin pointer.
943 __node->_M_nxt = _M_before_begin._M_nxt;
944 _M_before_begin._M_nxt = __node;
945
946 if (__node->_M_nxt)
947 // We must update former begin bucket that is pointing to
948 // _M_before_begin.
949 _M_buckets[_M_bucket_index(*__node->_M_next())] = __node;
950
951 _M_buckets[__bkt] = &_M_before_begin;
952 }
953 }
954
955 // Remove the bucket first node
956 void
957 _M_remove_bucket_begin(size_type __bkt, __node_ptr __next_n,
958 size_type __next_bkt)
959 {
960 if (!__next_n)
961 _M_buckets[__bkt] = nullptr;
962 else if (__next_bkt != __bkt)
963 {
964 _M_buckets[__next_bkt] = _M_buckets[__bkt];
965 _M_buckets[__bkt] = nullptr;
966 }
967 }
968
969 // Get the node before __n in the bucket __bkt
970 __node_base_ptr
971 _M_get_previous_node(size_type __bkt, __node_ptr __n);
972
973 pair<__node_ptr, __hash_code>
974 _M_compute_hash_code(__node_ptr __hint, const key_type& __k) const;
975
976 // Insert node __n with hash code __code, in bucket __bkt (or another
977 // bucket if rehashing is needed).
978 // Assumes no element with equivalent key is already present.
979 // Takes ownership of __n if insertion succeeds, throws otherwise.
980 // __n_elt is an estimated number of elements we expect to insert,
981 // used as a hint for rehashing when inserting a range.
982 iterator
983 _M_insert_unique_node(size_type __bkt, __hash_code,
984 __node_ptr __n, size_type __n_elt = 1);
985
986 // Insert node __n with key __k and hash code __code.
987 // Takes ownership of __n if insertion succeeds, throws otherwise.
988 iterator
989 _M_insert_multi_node(__node_ptr __hint,
990 __hash_code __code, __node_ptr __n);
991
992 template<typename... _Args>
994 _M_emplace_uniq(_Args&&... __args);
995
996#pragma GCC diagnostic push
997#pragma GCC diagnostic ignored "-Wc++14-extensions" // variable templates
998 template<typename _Arg, typename _DArg = __remove_cvref_t<_Arg>,
999 typename = _ExtractKey>
1000 static constexpr bool __is_key_type = false;
1001
1002 template<typename _Arg>
1003 static constexpr bool
1004 __is_key_type<_Arg, key_type, __detail::_Identity> = true;
1005
1006 template<typename _Arg, typename _Arg1, typename _Arg2>
1007 static constexpr bool
1008 __is_key_type<_Arg, pair<_Arg1, _Arg2>, __detail::_Select1st>
1009 = is_same<__remove_cvref_t<_Arg1>, key_type>::value;
1010#pragma GCC diagnostic pop
1011
1012 template<typename... _Args>
1013 iterator
1014 _M_emplace_multi(const_iterator, _Args&&... __args);
1015
1016 iterator
1017 _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n);
1018
1019 template<typename _InputIterator>
1020 void
1021 _M_insert_range_multi(_InputIterator __first, _InputIterator __last);
1022
1023 public:
1024#pragma GCC diagnostic push
1025#pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
1026 // Emplace
1027 template<typename... _Args>
1028 __ireturn_type
1029 emplace(_Args&&... __args)
1030 {
1031 if constexpr (__unique_keys::value)
1032 return _M_emplace_uniq(std::forward<_Args>(__args)...);
1033 else
1034 return _M_emplace_multi(cend(), std::forward<_Args>(__args)...);
1035 }
1036
1037 template<typename... _Args>
1038 iterator
1039 emplace_hint(const_iterator __hint, _Args&&... __args)
1040 {
1041 if constexpr (__unique_keys::value)
1042 return _M_emplace_uniq(std::forward<_Args>(__args)...).first;
1043 else
1044 return _M_emplace_multi(__hint, std::forward<_Args>(__args)...);
1045 }
1046
1047 // Insert
1048 __ireturn_type
1049 insert(const value_type& __v)
1050 {
1051 if constexpr (__unique_keys::value)
1052 return _M_emplace_uniq(__v);
1053 else
1054 return _M_emplace_multi(cend(), __v);
1055 }
1056
1057 iterator
1058 insert(const_iterator __hint, const value_type& __v)
1059 {
1060 if constexpr (__unique_keys::value)
1061 return _M_emplace_uniq(__v).first;
1062 else
1063 return _M_emplace_multi(__hint, __v);
1064 }
1065
1066 __ireturn_type
1067 insert(value_type&& __v)
1068 {
1069 if constexpr (__unique_keys::value)
1070 return _M_emplace_uniq(std::move(__v));
1071 else
1072 return _M_emplace_multi(cend(), std::move(__v));
1073 }
1074
1075 iterator
1076 insert(const_iterator __hint, value_type&& __v)
1077 {
1078 if constexpr (__unique_keys::value)
1079 return _M_emplace_uniq(std::move(__v)).first;
1080 else
1081 return _M_emplace_multi(__hint, std::move(__v));
1082 }
1083
1084#ifdef __glibcxx_unordered_map_try_emplace // C++ >= 17 && HOSTED
1085 template<typename _KType, typename... _Args>
1087 try_emplace(const_iterator, _KType&& __k, _Args&&... __args)
1088 {
1089 __hash_code __code;
1090 size_type __bkt;
1091 if (auto __loc = _M_locate(__k))
1092 return { iterator(__loc), false };
1093 else
1094 {
1095 __code = __loc._M_hash_code;
1096 __bkt = __loc._M_bucket_index;
1097 }
1098
1099 _Scoped_node __node {
1100 this,
1104 };
1105 auto __it = _M_insert_unique_node(__bkt, __code, __node._M_node);
1106 __node._M_node = nullptr;
1107 return { __it, true };
1108 }
1109#endif
1110
1111 void
1112 insert(initializer_list<value_type> __l)
1113 { this->insert(__l.begin(), __l.end()); }
1114
1115 template<typename _InputIterator>
1116 void
1117 insert(_InputIterator __first, _InputIterator __last)
1118 {
1119 if constexpr (__unique_keys::value)
1120 for (; __first != __last; ++__first)
1121 _M_emplace_uniq(*__first);
1122 else
1123 return _M_insert_range_multi(__first, __last);
1124 }
1125
1126 // This overload is only defined for maps, not sets.
1127 template<typename _Pair,
1128 typename = _Require<__not_<is_same<_Key, _Value>>,
1129 is_constructible<value_type, _Pair&&>>>
1130 __ireturn_type
1131 insert(_Pair&& __v)
1132 {
1133 if constexpr (__unique_keys::value)
1134 return _M_emplace_uniq(std::forward<_Pair>(__v));
1135 else
1136 return _M_emplace_multi(cend(), std::forward<_Pair>(__v));
1137 }
1138
1139 // This overload is only defined for maps, not sets.
1140 template<typename _Pair,
1141 typename = _Require<__not_<is_same<_Key, _Value>>,
1142 is_constructible<value_type, _Pair&&>>>
1143 iterator
1144 insert(const_iterator __hint, _Pair&& __v)
1145 {
1146 if constexpr (__unique_keys::value)
1147 return _M_emplace_uniq(std::forward<_Pair>(__v));
1148 else
1149 return _M_emplace_multi(__hint, std::forward<_Pair>(__v));
1150 }
1151#pragma GCC diagnostic pop
1152
1153 // Erase
1154 iterator
1155 erase(const_iterator);
1156
1157 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1158 // 2059. C++0x ambiguity problem with map::erase
1159 iterator
1160 erase(iterator __it)
1161 { return erase(const_iterator(__it)); }
1162
1163 size_type
1164 erase(const key_type& __k);
1165
1166 iterator
1167 erase(const_iterator, const_iterator);
1168
1169 void
1170 clear() noexcept;
1171
1172 // Set number of buckets keeping it appropriate for container's number
1173 // of elements.
1174 void rehash(size_type __bkt_count);
1175
1176 // DR 1189.
1177 // reserve, if present, comes from _Rehash_base.
1178
1179#if __glibcxx_node_extract // >= C++17 && HOSTED
1180 /// Re-insert an extracted node into a container with unique keys.
1181 insert_return_type
1182 _M_reinsert_node(node_type&& __nh)
1183 {
1184 insert_return_type __ret;
1185 if (__nh.empty())
1186 __ret.position = end();
1187 else
1188 {
1189 __glibcxx_assert(get_allocator() == __nh.get_allocator());
1190
1191 if (auto __loc = _M_locate(__nh._M_key()))
1192 {
1193 __ret.node = std::move(__nh);
1194 __ret.position = iterator(__loc);
1195 __ret.inserted = false;
1196 }
1197 else
1198 {
1199 auto __code = __loc._M_hash_code;
1200 auto __bkt = __loc._M_bucket_index;
1201 __ret.position
1202 = _M_insert_unique_node(__bkt, __code, __nh._M_ptr);
1203 __ret.inserted = true;
1204 __nh.release();
1205 }
1206 }
1207 return __ret;
1208 }
1209
1210 /// Re-insert an extracted node into a container with equivalent keys.
1211 iterator
1212 _M_reinsert_node_multi(const_iterator __hint, node_type&& __nh)
1213 {
1214 if (__nh.empty())
1215 return end();
1216
1217 __glibcxx_assert(get_allocator() == __nh.get_allocator());
1218
1219 const key_type& __k = __nh._M_key();
1220 auto __code = this->_M_hash_code(__k);
1221 auto __ret
1222 = _M_insert_multi_node(__hint._M_cur, __code, __nh._M_ptr);
1223 __nh.release();
1224 return __ret;
1225 }
1226
1227 private:
1228 node_type
1229 _M_extract_node(size_t __bkt, __node_base_ptr __prev_n)
1230 {
1231 __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
1232 if (__prev_n == _M_buckets[__bkt])
1233 _M_remove_bucket_begin(__bkt, __n->_M_next(),
1234 __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
1235 else if (__n->_M_nxt)
1236 {
1237 size_type __next_bkt = _M_bucket_index(*__n->_M_next());
1238 if (__next_bkt != __bkt)
1239 _M_buckets[__next_bkt] = __prev_n;
1240 }
1241
1242 __prev_n->_M_nxt = __n->_M_nxt;
1243 __n->_M_nxt = nullptr;
1244 --_M_element_count;
1245 return { __n, this->_M_node_allocator() };
1246 }
1247
1248 // Hash code for node __src_n with key __k, using this->hash_function().
1249 // Will use a hash code cached in the node if safe to do so. This is
1250 // for use in _M_merge_multi where the node comes from another container
1251 // with a hash function that might not match this->hash_function().
1252 template<typename _H2>
1253 __hash_code
1254 _M_src_hash_code(const _H2&, const __node_value_type& __src_n) const
1255 {
1256 if constexpr (__and_<__hash_cached,
1257 is_same<_H2, _Hash>, is_empty<_Hash>>::value)
1258 // If the node has a cached hash code, it's OK to use it.
1259 return __src_n._M_hash_code;
1260 else
1261 return this->_M_hash_code(_ExtractKey{}(__src_n._M_v()));
1262 }
1263
1264 public:
1265 // Extract a node.
1266 node_type
1267 extract(const_iterator __pos)
1268 {
1269 size_t __bkt = _M_bucket_index(*__pos._M_cur);
1270 return _M_extract_node(__bkt,
1271 _M_get_previous_node(__bkt, __pos._M_cur));
1272 }
1273
1274 /// Extract a node.
1275 node_type
1276 extract(const _Key& __k)
1277 {
1278 node_type __nh;
1279 __hash_code __code = this->_M_hash_code(__k);
1280 std::size_t __bkt = _M_bucket_index(__code);
1281 if (__node_base_ptr __prev_node = _M_find_before_node(__bkt, __k, __code))
1282 __nh = _M_extract_node(__bkt, __prev_node);
1283 return __nh;
1284 }
1285
1286 /// Merge from another container of the same type.
1287 void
1288 _M_merge_unique(_Hashtable& __src)
1289 {
1290 __glibcxx_assert(get_allocator() == __src.get_allocator());
1291
1292 using _PTr = pointer_traits<__node_base_ptr>;
1293
1294 auto __n_elt = __src.size();
1295 size_type __first = 1;
1296 // For a container of identical type we can use its private members,
1297 // __src._M_before_begin, __src._M_bucket_index etc.
1298 auto __prev = _PTr::pointer_to(__src._M_before_begin);
1299 while (__n_elt--)
1300 {
1301 const auto __next = __prev->_M_nxt;
1302 const auto& __node = static_cast<__node_type&>(*__next);
1303 const key_type& __k = _ExtractKey{}(__node._M_v());
1304 const auto __loc = _M_locate(__k);
1305 if (__loc)
1306 {
1307 __prev = __next;
1308 continue;
1309 }
1310
1311 auto __src_bkt = __src._M_bucket_index(__node);
1312 auto __nh = __src._M_extract_node(__src_bkt, __prev);
1313 _M_insert_unique_node(__loc._M_bucket_index, __loc._M_hash_code,
1314 __nh._M_ptr, __first * __n_elt + 1);
1315 __nh.release();
1316 __first = 0;
1317 }
1318 }
1319
1320 /// Merge from a compatible container into one with unique keys.
1321 template<typename _Compatible_Hashtable>
1322 void
1323 _M_merge_unique(_Compatible_Hashtable& __src)
1324 {
1325 static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
1326 node_type>, "Node types are compatible");
1327 __glibcxx_assert(get_allocator() == __src.get_allocator());
1328
1329 auto __n_elt = __src.size();
1330 size_type __first = 1;
1331 // For a compatible container we can only use the public API,
1332 // so cbegin(), cend(), hash_function(), and extract(iterator).
1333 for (auto __i = __src.cbegin(), __end = __src.cend(); __i != __end;)
1334 {
1335 --__n_elt;
1336 auto __pos = __i++;
1337 const key_type& __k = _ExtractKey{}(*__pos);
1338 const auto __loc = _M_locate(__k);
1339 if (__loc)
1340 continue;
1341
1342 auto __nh = __src.extract(__pos);
1343 _M_insert_unique_node(__loc._M_bucket_index,
1344 __loc._M_hash_code, __nh._M_ptr,
1345 __first * __n_elt + 1);
1346 __nh.release();
1347 __first = 0;
1348 }
1349 }
1350
1351 /// Merge from another container of the same type.
1352 void
1353 _M_merge_multi(_Hashtable& __src)
1354 {
1355 __glibcxx_assert(get_allocator() == __src.get_allocator());
1356
1357 if (__src.size() == 0) [[__unlikely__]]
1358 return;
1359
1360 using _PTr = pointer_traits<__node_base_ptr>;
1361
1362 __node_ptr __hint = nullptr;
1363 this->reserve(size() + __src.size());
1364 // For a container of identical type we can use its private members,
1365 // __src._M_before_begin, __src._M_bucket_index etc.
1366 auto __prev = _PTr::pointer_to(__src._M_before_begin);
1367 do
1368 {
1369 const auto& __node = static_cast<__node_type&>(*__prev->_M_nxt);
1370 // Hash code from this:
1371 auto __code = _M_hash_code_ext(__node);
1372 // Bucket index in __src, using code from __src.hash_function():
1373 size_type __src_bkt = __src._M_bucket_index(__node);
1374 auto __nh = __src._M_extract_node(__src_bkt, __prev);
1375 __hint = _M_insert_multi_node(__hint, __code, __nh._M_ptr)._M_cur;
1376 __nh.release();
1377 }
1378 while (__prev->_M_nxt != nullptr);
1379 }
1380
1381 /// Merge from a compatible container into one with equivalent keys.
1382 template<typename _Compatible_Hashtable>
1383 void
1384 _M_merge_multi(_Compatible_Hashtable& __src)
1385 {
1386 static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
1387 node_type>, "Node types are compatible");
1388 __glibcxx_assert(get_allocator() == __src.get_allocator());
1389
1390 __node_ptr __hint = nullptr;
1391 this->reserve(size() + __src.size());
1392 // For a compatible container we can only use the public API,
1393 // so cbegin(), cend(), hash_function(), and extract(iterator).
1394 for (auto __i = __src.cbegin(), __end = __src.cend(); __i != __end;)
1395 {
1396 auto __pos = __i++;
1397 __hash_code __code
1398 = _M_src_hash_code(__src.hash_function(), *__pos._M_cur);
1399 auto __nh = __src.extract(__pos);
1400 __hint = _M_insert_multi_node(__hint, __code, __nh._M_ptr)._M_cur;
1401 __nh.release();
1402 }
1403 }
1404#endif // C++17 __glibcxx_node_extract
1405
1406 bool
1407 _M_equal(const _Hashtable& __other) const;
1408
1409 private:
1410 // Helper rehash method used when keys are unique.
1411 void _M_rehash(size_type __bkt_count, true_type __uks);
1412
1413 // Helper rehash method used when keys can be non-unique.
1414 void _M_rehash(size_type __bkt_count, false_type __uks);
1415 };
1416
1417 // Definitions of class template _Hashtable's out-of-line member functions.
1418 template<typename _Key, typename _Value, typename _Alloc,
1419 typename _ExtractKey, typename _Equal,
1420 typename _Hash, typename _RangeHash, typename _Unused,
1421 typename _RehashPolicy, typename _Traits>
1422 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1423 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1424 _Hashtable(size_type __bkt_count_hint,
1425 const _Hash& __h, const _Equal& __eq, const allocator_type& __a)
1426 : _Hashtable(__h, __eq, __a)
1427 {
1428 auto __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count_hint);
1429 if (__bkt_count > _M_bucket_count)
1430 {
1431 _M_buckets = _M_allocate_buckets(__bkt_count);
1432 _M_bucket_count = __bkt_count;
1433 }
1434 }
1435
1436 template<typename _Key, typename _Value, typename _Alloc,
1437 typename _ExtractKey, typename _Equal,
1438 typename _Hash, typename _RangeHash, typename _Unused,
1439 typename _RehashPolicy, typename _Traits>
1440 template<typename _InputIterator>
1441 inline
1442 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1443 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1444 _Hashtable(_InputIterator __f, _InputIterator __l,
1445 size_type __bkt_count_hint,
1446 const _Hash& __h, const _Equal& __eq,
1447 const allocator_type& __a, true_type /* __uks */)
1448 : _Hashtable(__bkt_count_hint, __h, __eq, __a)
1449 { this->insert(__f, __l); }
1450
1451 template<typename _Key, typename _Value, typename _Alloc,
1452 typename _ExtractKey, typename _Equal,
1453 typename _Hash, typename _RangeHash, typename _Unused,
1454 typename _RehashPolicy, typename _Traits>
1455 template<typename _InputIterator>
1456 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1457 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1458 _Hashtable(_InputIterator __f, _InputIterator __l,
1459 size_type __bkt_count_hint,
1460 const _Hash& __h, const _Equal& __eq,
1461 const allocator_type& __a, false_type __uks)
1462 : _Hashtable(__h, __eq, __a)
1463 {
1464 auto __nb_elems = __detail::__distance_fw(__f, __l);
1465 auto __bkt_count =
1466 _M_rehash_policy._M_next_bkt(
1467 std::max(_M_rehash_policy._M_bkt_for_elements(__nb_elems),
1468 __bkt_count_hint));
1469
1470 if (__bkt_count > _M_bucket_count)
1471 {
1472 _M_buckets = _M_allocate_buckets(__bkt_count);
1473 _M_bucket_count = __bkt_count;
1474 }
1475
1476 for (; __f != __l; ++__f)
1477 _M_emplace_multi(cend(), *__f);
1478 }
1479
1480 template<typename _Key, typename _Value, typename _Alloc,
1481 typename _ExtractKey, typename _Equal,
1482 typename _Hash, typename _RangeHash, typename _Unused,
1483 typename _RehashPolicy, typename _Traits>
1484 auto
1485 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1486 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1487 operator=(const _Hashtable& __ht)
1488 -> _Hashtable&
1489 {
1490 if (&__ht == this)
1491 return *this;
1492
1493 if (__node_alloc_traits::_S_propagate_on_copy_assign())
1494 {
1495 auto& __this_alloc = this->_M_node_allocator();
1496 auto& __that_alloc = __ht._M_node_allocator();
1497 if (!__node_alloc_traits::_S_always_equal()
1498 && __this_alloc != __that_alloc)
1499 {
1500 // Replacement allocator cannot free existing storage.
1501 this->_M_deallocate_nodes(_M_begin());
1502 _M_before_begin._M_nxt = nullptr;
1503 _M_deallocate_buckets();
1504 _M_buckets = nullptr;
1505 std::__alloc_on_copy(__this_alloc, __that_alloc);
1506 __hashtable_base::operator=(__ht);
1507 _M_bucket_count = __ht._M_bucket_count;
1508 _M_element_count = __ht._M_element_count;
1509 _M_rehash_policy = __ht._M_rehash_policy;
1510
1511 struct _Guard
1512 {
1513 ~_Guard() { if (_M_ht) _M_ht->_M_reset(); }
1514 _Hashtable* _M_ht;
1515 };
1516 // If _M_assign exits via an exception it will have deallocated
1517 // all memory. This guard will ensure *this is in a usable state.
1518 _Guard __guard{this};
1519 _M_assign(__ht);
1520 __guard._M_ht = nullptr;
1521 return *this;
1522 }
1523 std::__alloc_on_copy(__this_alloc, __that_alloc);
1524 }
1525
1526 // Reuse allocated buckets and nodes.
1527 _M_assign_elements(__ht);
1528 return *this;
1529 }
1530
1531 template<typename _Key, typename _Value, typename _Alloc,
1532 typename _ExtractKey, typename _Equal,
1533 typename _Hash, typename _RangeHash, typename _Unused,
1534 typename _RehashPolicy, typename _Traits>
1535 template<typename _Ht>
1536 void
1537 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1538 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1539 _M_assign_elements(_Ht&& __ht)
1540 {
1541 using __reuse_or_alloc_node_gen_t =
1542 __detail::_ReuseOrAllocNode<__node_alloc_type>;
1543
1544 __buckets_ptr __former_buckets = nullptr;
1545 std::size_t __former_bucket_count = _M_bucket_count;
1546 __rehash_guard_t __rehash_guard(_M_rehash_policy);
1547
1548 if (_M_bucket_count != __ht._M_bucket_count)
1549 {
1550 __former_buckets = _M_buckets;
1551 _M_buckets = _M_allocate_buckets(__ht._M_bucket_count);
1552 _M_bucket_count = __ht._M_bucket_count;
1553 }
1554 else
1555 std::fill_n(_M_buckets, _M_bucket_count, nullptr);
1556
1557 __try
1558 {
1559 __hashtable_base::operator=(std::forward<_Ht>(__ht));
1560 _M_element_count = __ht._M_element_count;
1561 _M_rehash_policy = __ht._M_rehash_policy;
1562 __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
1563 _M_before_begin._M_nxt = nullptr;
1564 _M_assign(std::forward<_Ht>(__ht), __roan);
1565 if (__former_buckets)
1566 _M_deallocate_buckets(__former_buckets, __former_bucket_count);
1567 __rehash_guard._M_guarded_obj = nullptr;
1568 }
1569 __catch(...)
1570 {
1571 if (__former_buckets)
1572 {
1573 // Restore previous buckets.
1574 _M_deallocate_buckets();
1575 _M_buckets = __former_buckets;
1576 _M_bucket_count = __former_bucket_count;
1577 }
1578 std::fill_n(_M_buckets, _M_bucket_count, nullptr);
1579 __throw_exception_again;
1580 }
1581 }
1582
1583 template<typename _Key, typename _Value, typename _Alloc,
1584 typename _ExtractKey, typename _Equal,
1585 typename _Hash, typename _RangeHash, typename _Unused,
1586 typename _RehashPolicy, typename _Traits>
1587 template<typename _Ht, typename _NodeGenerator>
1588 void
1589 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1590 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1591 _M_assign(_Ht&& __ht, _NodeGenerator& __node_gen)
1592 {
1593 struct _Guard
1594 {
1595 ~_Guard()
1596 {
1597 if (_M_ht)
1598 {
1599 _M_ht->clear();
1600 if (_M_dealloc_buckets)
1601 _M_ht->_M_deallocate_buckets();
1602 }
1603 }
1604 _Hashtable* _M_ht = nullptr;
1605 bool _M_dealloc_buckets = false;
1606 };
1607 _Guard __guard;
1608
1609 if (!_M_buckets)
1610 {
1611 _M_buckets = _M_allocate_buckets(_M_bucket_count);
1612 __guard._M_dealloc_buckets = true;
1613 }
1614
1615 if (!__ht._M_before_begin._M_nxt)
1616 return;
1617
1618 __guard._M_ht = this;
1619
1620 using _FromVal = __conditional_t<is_lvalue_reference<_Ht>::value,
1621 const value_type&, value_type&&>;
1622
1623 // First deal with the special first node pointed to by
1624 // _M_before_begin.
1625 __node_ptr __ht_n = __ht._M_begin();
1626 __node_ptr __this_n
1627 = __node_gen(static_cast<_FromVal>(__ht_n->_M_v()));
1628 _M_copy_code(*__this_n, *__ht_n);
1629 _M_update_bbegin(__this_n);
1630
1631 // Then deal with other nodes.
1632 __node_ptr __prev_n = __this_n;
1633 for (__ht_n = __ht_n->_M_next(); __ht_n; __ht_n = __ht_n->_M_next())
1634 {
1635 __this_n = __node_gen(static_cast<_FromVal>(__ht_n->_M_v()));
1636 __prev_n->_M_nxt = __this_n;
1637 _M_copy_code(*__this_n, *__ht_n);
1638 size_type __bkt = _M_bucket_index(*__this_n);
1639 if (!_M_buckets[__bkt])
1640 _M_buckets[__bkt] = __prev_n;
1641 __prev_n = __this_n;
1642 }
1643 __guard._M_ht = nullptr;
1644 }
1645
1646 template<typename _Key, typename _Value, typename _Alloc,
1647 typename _ExtractKey, typename _Equal,
1648 typename _Hash, typename _RangeHash, typename _Unused,
1649 typename _RehashPolicy, typename _Traits>
1650 void
1651 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1652 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1653 _M_reset() noexcept
1654 {
1655 _M_rehash_policy._M_reset();
1656 _M_bucket_count = 1;
1657 _M_single_bucket = nullptr;
1658 _M_buckets = &_M_single_bucket;
1659 _M_before_begin._M_nxt = nullptr;
1660 _M_element_count = 0;
1661 }
1662
1663 template<typename _Key, typename _Value, typename _Alloc,
1664 typename _ExtractKey, typename _Equal,
1665 typename _Hash, typename _RangeHash, typename _Unused,
1666 typename _RehashPolicy, typename _Traits>
1667 void
1668 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1669 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1670 _M_move_assign(_Hashtable&& __ht, true_type)
1671 {
1672 if (__builtin_expect(std::__addressof(__ht) == this, false))
1673 return;
1674
1675 this->_M_deallocate_nodes(_M_begin());
1676 _M_deallocate_buckets();
1677 __hashtable_base::operator=(std::move(__ht));
1678 _M_rehash_policy = __ht._M_rehash_policy;
1679 if (!__ht._M_uses_single_bucket())
1680 _M_buckets = __ht._M_buckets;
1681 else
1682 {
1683 _M_buckets = &_M_single_bucket;
1684 _M_single_bucket = __ht._M_single_bucket;
1685 }
1686
1687 _M_bucket_count = __ht._M_bucket_count;
1688 _M_before_begin._M_nxt = __ht._M_before_begin._M_nxt;
1689 _M_element_count = __ht._M_element_count;
1690 std::__alloc_on_move(this->_M_node_allocator(), __ht._M_node_allocator());
1691
1692 // Fix bucket containing the _M_before_begin pointer that can't be moved.
1693 _M_update_bbegin();
1694 __ht._M_reset();
1695 }
1696
1697 template<typename _Key, typename _Value, typename _Alloc,
1698 typename _ExtractKey, typename _Equal,
1699 typename _Hash, typename _RangeHash, typename _Unused,
1700 typename _RehashPolicy, typename _Traits>
1701 void
1702 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1703 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1704 _M_move_assign(_Hashtable&& __ht, false_type)
1705 {
1706 if (__ht._M_node_allocator() == this->_M_node_allocator())
1707 _M_move_assign(std::move(__ht), true_type{});
1708 else
1709 {
1710 // Can't move memory, move elements then.
1711 _M_assign_elements(std::move(__ht));
1712 __ht.clear();
1713 }
1714 }
1715
1716 template<typename _Key, typename _Value, typename _Alloc,
1717 typename _ExtractKey, typename _Equal,
1718 typename _Hash, typename _RangeHash, typename _Unused,
1719 typename _RehashPolicy, typename _Traits>
1720 inline
1721 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1722 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1723 _Hashtable(const _Hashtable& __ht)
1724 : __hashtable_base(__ht),
1725 __map_base(__ht),
1726 __rehash_base(__ht),
1727 __hashtable_alloc(
1728 __node_alloc_traits::_S_select_on_copy(__ht._M_node_allocator())),
1729 __enable_default_ctor(__ht),
1730 _M_buckets(nullptr),
1731 _M_bucket_count(__ht._M_bucket_count),
1732 _M_element_count(__ht._M_element_count),
1733 _M_rehash_policy(__ht._M_rehash_policy)
1734 {
1735 _M_assign(__ht);
1736 }
1737
1738 template<typename _Key, typename _Value, typename _Alloc,
1739 typename _ExtractKey, typename _Equal,
1740 typename _Hash, typename _RangeHash, typename _Unused,
1741 typename _RehashPolicy, typename _Traits>
1742 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1743 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1744 _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1745 true_type /* alloc always equal */)
1746 noexcept(_S_nothrow_move())
1747 : __hashtable_base(__ht),
1748 __map_base(__ht),
1749 __rehash_base(__ht),
1750 __hashtable_alloc(std::move(__a)),
1751 __enable_default_ctor(__ht),
1752 _M_buckets(__ht._M_buckets),
1753 _M_bucket_count(__ht._M_bucket_count),
1754 _M_before_begin(__ht._M_before_begin._M_nxt),
1755 _M_element_count(__ht._M_element_count),
1756 _M_rehash_policy(__ht._M_rehash_policy)
1757 {
1758 // Update buckets if __ht is using its single bucket.
1759 if (__ht._M_uses_single_bucket())
1760 {
1761 _M_buckets = &_M_single_bucket;
1762 _M_single_bucket = __ht._M_single_bucket;
1763 }
1764
1765 // Fix bucket containing the _M_before_begin pointer that can't be moved.
1766 _M_update_bbegin();
1767
1768 __ht._M_reset();
1769 }
1770
1771 template<typename _Key, typename _Value, typename _Alloc,
1772 typename _ExtractKey, typename _Equal,
1773 typename _Hash, typename _RangeHash, typename _Unused,
1774 typename _RehashPolicy, typename _Traits>
1775 inline
1776 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1777 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1778 _Hashtable(const _Hashtable& __ht, const allocator_type& __a)
1779 : __hashtable_base(__ht),
1780 __map_base(__ht),
1781 __rehash_base(__ht),
1782 __hashtable_alloc(__node_alloc_type(__a)),
1783 __enable_default_ctor(__ht),
1784 _M_buckets(),
1785 _M_bucket_count(__ht._M_bucket_count),
1786 _M_element_count(__ht._M_element_count),
1787 _M_rehash_policy(__ht._M_rehash_policy)
1788 {
1789 _M_assign(__ht);
1790 }
1791
1792 template<typename _Key, typename _Value, typename _Alloc,
1793 typename _ExtractKey, typename _Equal,
1794 typename _Hash, typename _RangeHash, typename _Unused,
1795 typename _RehashPolicy, typename _Traits>
1796 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1797 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1798 _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1799 false_type /* alloc always equal */)
1800 : __hashtable_base(__ht),
1801 __map_base(__ht),
1802 __rehash_base(__ht),
1803 __hashtable_alloc(std::move(__a)),
1804 __enable_default_ctor(__ht),
1805 _M_buckets(nullptr),
1806 _M_bucket_count(__ht._M_bucket_count),
1807 _M_element_count(__ht._M_element_count),
1808 _M_rehash_policy(__ht._M_rehash_policy)
1809 {
1810 if (__ht._M_node_allocator() == this->_M_node_allocator())
1811 {
1812 if (__ht._M_uses_single_bucket())
1813 {
1814 _M_buckets = &_M_single_bucket;
1815 _M_single_bucket = __ht._M_single_bucket;
1816 }
1817 else
1818 _M_buckets = __ht._M_buckets;
1819
1820 // Fix bucket containing the _M_before_begin pointer that can't be
1821 // moved.
1822 _M_update_bbegin(__ht._M_begin());
1823
1824 __ht._M_reset();
1825 }
1826 else
1827 {
1828 using _Fwd_Ht = __conditional_t<
1829 __move_if_noexcept_cond<value_type>::value,
1830 const _Hashtable&, _Hashtable&&>;
1831 _M_assign(std::forward<_Fwd_Ht>(__ht));
1832 __ht.clear();
1833 }
1834 }
1835
1836 template<typename _Key, typename _Value, typename _Alloc,
1837 typename _ExtractKey, typename _Equal,
1838 typename _Hash, typename _RangeHash, typename _Unused,
1839 typename _RehashPolicy, typename _Traits>
1840 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1841 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1842 ~_Hashtable() noexcept
1843 {
1844 // Getting a bucket index from a node shall not throw because it is used
1845 // during the rehash process. This static_assert purpose is limited to usage
1846 // of _Hashtable with _Hashtable_traits requesting non-cached hash code.
1847 // Need a complete type to check this, so do it in the destructor not at
1848 // class scope.
1849 static_assert(noexcept(declval<const __hash_code_base_access&>()
1850 ._M_bucket_index(declval<const __node_value_type&>(),
1851 (std::size_t)0)),
1852 "Cache the hash code or qualify your functors involved"
1853 " in hash code and bucket index computation with noexcept");
1854
1855 this->_M_deallocate_nodes(_M_begin());
1856 _M_deallocate_buckets();
1857 }
1858
1859 template<typename _Key, typename _Value, typename _Alloc,
1860 typename _ExtractKey, typename _Equal,
1861 typename _Hash, typename _RangeHash, typename _Unused,
1862 typename _RehashPolicy, typename _Traits>
1863 void
1864 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1865 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1866 swap(_Hashtable& __x)
1867 noexcept(__and_<__is_nothrow_swappable<_Hash>,
1868 __is_nothrow_swappable<_Equal>>::value)
1869 {
1870 using std::swap;
1871 swap(__hash_code_base::_M_hash._M_obj,
1872 __x.__hash_code_base::_M_hash._M_obj);
1873 swap(__hashtable_base::_M_equal._M_obj,
1874 __x.__hashtable_base::_M_equal._M_obj);
1875
1876#pragma GCC diagnostic push
1877#pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
1878 if constexpr (__node_alloc_traits::propagate_on_container_swap::value)
1879 swap(this->_M_node_allocator(), __x._M_node_allocator());
1880#pragma GCC diagnostic pop
1881
1882 std::swap(_M_rehash_policy, __x._M_rehash_policy);
1883
1884 // Deal properly with potentially moved instances.
1885 if (this->_M_uses_single_bucket())
1886 {
1887 if (!__x._M_uses_single_bucket())
1888 {
1889 _M_buckets = __x._M_buckets;
1890 __x._M_buckets = &__x._M_single_bucket;
1891 }
1892 }
1893 else if (__x._M_uses_single_bucket())
1894 {
1895 __x._M_buckets = _M_buckets;
1896 _M_buckets = &_M_single_bucket;
1897 }
1898 else
1899 std::swap(_M_buckets, __x._M_buckets);
1900
1901 std::swap(_M_bucket_count, __x._M_bucket_count);
1902 std::swap(_M_before_begin._M_nxt, __x._M_before_begin._M_nxt);
1903 std::swap(_M_element_count, __x._M_element_count);
1904 std::swap(_M_single_bucket, __x._M_single_bucket);
1905
1906 // Fix buckets containing the _M_before_begin pointers that can't be
1907 // swapped.
1908 _M_update_bbegin();
1909 __x._M_update_bbegin();
1910 }
1911
1912 template<typename _Key, typename _Value, typename _Alloc,
1913 typename _ExtractKey, typename _Equal,
1914 typename _Hash, typename _RangeHash, typename _Unused,
1915 typename _RehashPolicy, typename _Traits>
1916 auto
1917 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1918 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1919 find(const key_type& __k)
1920 -> iterator
1921 { return iterator(_M_locate(__k)); }
1922
1923 template<typename _Key, typename _Value, typename _Alloc,
1924 typename _ExtractKey, typename _Equal,
1925 typename _Hash, typename _RangeHash, typename _Unused,
1926 typename _RehashPolicy, typename _Traits>
1927 auto
1928 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1929 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1930 find(const key_type& __k) const
1931 -> const_iterator
1932 { return const_iterator(_M_locate(__k)); }
1933
1934#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED
1935 template<typename _Key, typename _Value, typename _Alloc,
1936 typename _ExtractKey, typename _Equal,
1937 typename _Hash, typename _RangeHash, typename _Unused,
1938 typename _RehashPolicy, typename _Traits>
1939 template<typename _Kt, typename, typename>
1940 auto
1941 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1942 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1943 _M_find_tr(const _Kt& __k)
1944 -> iterator
1945 {
1946 if (size() <= __small_size_threshold())
1947 {
1948 for (auto __n = _M_begin(); __n; __n = __n->_M_next())
1949 if (this->_M_key_equals_tr(__k, *__n))
1950 return iterator(__n);
1951 return end();
1952 }
1953
1954 __hash_code __code = this->_M_hash_code_tr(__k);
1955 std::size_t __bkt = _M_bucket_index(__code);
1956 return iterator(_M_find_node_tr(__bkt, __k, __code));
1957 }
1958
1959 template<typename _Key, typename _Value, typename _Alloc,
1960 typename _ExtractKey, typename _Equal,
1961 typename _Hash, typename _RangeHash, typename _Unused,
1962 typename _RehashPolicy, typename _Traits>
1963 template<typename _Kt, typename, typename>
1964 auto
1965 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1966 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1967 _M_find_tr(const _Kt& __k) const
1968 -> const_iterator
1969 {
1970 if (size() <= __small_size_threshold())
1971 {
1972 for (auto __n = _M_begin(); __n; __n = __n->_M_next())
1973 if (this->_M_key_equals_tr(__k, *__n))
1974 return const_iterator(__n);
1975 return end();
1976 }
1977
1978 __hash_code __code = this->_M_hash_code_tr(__k);
1979 std::size_t __bkt = _M_bucket_index(__code);
1980 return const_iterator(_M_find_node_tr(__bkt, __k, __code));
1981 }
1982#endif // C++20 __glibcxx_generic_unordered_lookup
1983
1984 template<typename _Key, typename _Value, typename _Alloc,
1985 typename _ExtractKey, typename _Equal,
1986 typename _Hash, typename _RangeHash, typename _Unused,
1987 typename _RehashPolicy, typename _Traits>
1988 auto
1989 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1990 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1991 count(const key_type& __k) const
1992 -> size_type
1993 {
1994 auto __it = find(__k);
1995 if (!__it._M_cur)
1996 return 0;
1997
1998 if (__unique_keys::value)
1999 return 1;
2000
2001 size_type __result = 1;
2002 for (auto __ref = __it++;
2003 __it._M_cur && this->_M_node_equals(*__ref._M_cur, *__it._M_cur);
2004 ++__it)
2005 ++__result;
2006
2007 return __result;
2008 }
2009
2010#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED
2011 template<typename _Key, typename _Value, typename _Alloc,
2012 typename _ExtractKey, typename _Equal,
2013 typename _Hash, typename _RangeHash, typename _Unused,
2014 typename _RehashPolicy, typename _Traits>
2015 template<typename _Kt, typename, typename>
2016 auto
2017 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2018 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2019 _M_count_tr(const _Kt& __k) const
2020 -> size_type
2021 {
2022 if (size() <= __small_size_threshold())
2023 {
2024 size_type __result = 0;
2025 for (auto __n = _M_begin(); __n; __n = __n->_M_next())
2026 {
2027 if (this->_M_key_equals_tr(__k, *__n))
2028 {
2029 ++__result;
2030 continue;
2031 }
2032
2033 if (__result)
2034 break;
2035 }
2036
2037 return __result;
2038 }
2039
2040 __hash_code __code = this->_M_hash_code_tr(__k);
2041 std::size_t __bkt = _M_bucket_index(__code);
2042 auto __n = _M_find_node_tr(__bkt, __k, __code);
2043 if (!__n)
2044 return 0;
2045
2046 iterator __it(__n);
2047 size_type __result = 1;
2048 for (++__it;
2049 __it._M_cur && this->_M_equals_tr(__k, __code, *__it._M_cur);
2050 ++__it)
2051 ++__result;
2052
2053 return __result;
2054 }
2055#endif // C++20 __glibcxx_generic_unordered_lookup
2056
2057 template<typename _Key, typename _Value, typename _Alloc,
2058 typename _ExtractKey, typename _Equal,
2059 typename _Hash, typename _RangeHash, typename _Unused,
2060 typename _RehashPolicy, typename _Traits>
2061 auto
2062 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2063 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2064 equal_range(const key_type& __k)
2065 -> pair<iterator, iterator>
2066 {
2067 auto __ite = find(__k);
2068 if (!__ite._M_cur)
2069 return { __ite, __ite };
2070
2071 auto __beg = __ite++;
2072 if (__unique_keys::value)
2073 return { __beg, __ite };
2074
2075 while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
2076 ++__ite;
2077
2078 return { __beg, __ite };
2079 }
2080
2081 template<typename _Key, typename _Value, typename _Alloc,
2082 typename _ExtractKey, typename _Equal,
2083 typename _Hash, typename _RangeHash, typename _Unused,
2084 typename _RehashPolicy, typename _Traits>
2085 auto
2086 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2087 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2088 equal_range(const key_type& __k) const
2089 -> pair<const_iterator, const_iterator>
2090 {
2091 auto __ite = find(__k);
2092 if (!__ite._M_cur)
2093 return { __ite, __ite };
2094
2095 auto __beg = __ite++;
2096 if (__unique_keys::value)
2097 return { __beg, __ite };
2098
2099 while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
2100 ++__ite;
2101
2102 return { __beg, __ite };
2103 }
2104
2105#ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED
2106 template<typename _Key, typename _Value, typename _Alloc,
2107 typename _ExtractKey, typename _Equal,
2108 typename _Hash, typename _RangeHash, typename _Unused,
2109 typename _RehashPolicy, typename _Traits>
2110 template<typename _Kt, typename, typename>
2111 auto
2112 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2113 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2114 _M_equal_range_tr(const _Kt& __k)
2115 -> pair<iterator, iterator>
2116 {
2117 if (size() <= __small_size_threshold())
2118 {
2119 __node_ptr __n, __beg = nullptr;
2120 for (__n = _M_begin(); __n; __n = __n->_M_next())
2121 {
2122 if (this->_M_key_equals_tr(__k, *__n))
2123 {
2124 if (!__beg)
2125 __beg = __n;
2126 continue;
2127 }
2128
2129 if (__beg)
2130 break;
2131 }
2132
2133 return { iterator(__beg), iterator(__n) };
2134 }
2135
2136 __hash_code __code = this->_M_hash_code_tr(__k);
2137 std::size_t __bkt = _M_bucket_index(__code);
2138 auto __n = _M_find_node_tr(__bkt, __k, __code);
2139 iterator __ite(__n);
2140 if (!__n)
2141 return { __ite, __ite };
2142
2143 auto __beg = __ite++;
2144 while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
2145 ++__ite;
2146
2147 return { __beg, __ite };
2148 }
2149
2150 template<typename _Key, typename _Value, typename _Alloc,
2151 typename _ExtractKey, typename _Equal,
2152 typename _Hash, typename _RangeHash, typename _Unused,
2153 typename _RehashPolicy, typename _Traits>
2154 template<typename _Kt, typename, typename>
2155 auto
2156 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2157 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2158 _M_equal_range_tr(const _Kt& __k) const
2159 -> pair<const_iterator, const_iterator>
2160 {
2161 if (size() <= __small_size_threshold())
2162 {
2163 __node_ptr __n, __beg = nullptr;
2164 for (__n = _M_begin(); __n; __n = __n->_M_next())
2165 {
2166 if (this->_M_key_equals_tr(__k, *__n))
2167 {
2168 if (!__beg)
2169 __beg = __n;
2170 continue;
2171 }
2172
2173 if (__beg)
2174 break;
2175 }
2176
2177 return { const_iterator(__beg), const_iterator(__n) };
2178 }
2179
2180 __hash_code __code = this->_M_hash_code_tr(__k);
2181 std::size_t __bkt = _M_bucket_index(__code);
2182 auto __n = _M_find_node_tr(__bkt, __k, __code);
2183 const_iterator __ite(__n);
2184 if (!__n)
2185 return { __ite, __ite };
2186
2187 auto __beg = __ite++;
2188 while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
2189 ++__ite;
2190
2191 return { __beg, __ite };
2192 }
2193#endif // C++20 __glibcxx_generic_unordered_lookup
2194
2195 // Find the node before the one whose key compares equal to k in the bucket
2196 // bkt. Return nullptr if no node is found.
2197 template<typename _Key, typename _Value, typename _Alloc,
2198 typename _ExtractKey, typename _Equal,
2199 typename _Hash, typename _RangeHash, typename _Unused,
2200 typename _RehashPolicy, typename _Traits>
2201 auto
2202 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2203 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2204 _M_find_before_node(size_type __bkt, const key_type& __k,
2205 __hash_code __code) const
2206 -> __node_base_ptr
2207 {
2208 __node_base_ptr __prev_p = _M_buckets[__bkt];
2209 if (!__prev_p)
2210 return nullptr;
2211
2212 for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
2213 __p = __p->_M_next())
2214 {
2215 if (this->_M_equals(__k, __code, *__p))
2216 return __prev_p;
2217
2218 if (__builtin_expect (!__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt, 0))
2219 break;
2220 __prev_p = __p;
2221 }
2222
2223 return nullptr;
2224 }
2225
2226 template<typename _Key, typename _Value, typename _Alloc,
2227 typename _ExtractKey, typename _Equal,
2228 typename _Hash, typename _RangeHash, typename _Unused,
2229 typename _RehashPolicy, typename _Traits>
2230 template<typename _Kt>
2231 auto
2232 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2233 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2234 _M_find_before_node_tr(size_type __bkt, const _Kt& __k,
2235 __hash_code __code) const
2236 -> __node_base_ptr
2237 {
2238 __node_base_ptr __prev_p = _M_buckets[__bkt];
2239 if (!__prev_p)
2240 return nullptr;
2241
2242 for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
2243 __p = __p->_M_next())
2244 {
2245 if (this->_M_equals_tr(__k, __code, *__p))
2246 return __prev_p;
2247
2248 if (__builtin_expect (!__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt, 0))
2249 break;
2250 __prev_p = __p;
2251 }
2252
2253 return nullptr;
2254 }
2255
2256 template<typename _Key, typename _Value, typename _Alloc,
2257 typename _ExtractKey, typename _Equal,
2258 typename _Hash, typename _RangeHash, typename _Unused,
2259 typename _RehashPolicy, typename _Traits>
2260 inline auto
2261 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2262 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2263 _M_locate(const key_type& __k) const
2264 -> __location_type
2265 {
2266 __location_type __loc;
2267 const auto __size = size();
2268
2269 if (__size <= __small_size_threshold())
2270 {
2271 __loc._M_before = pointer_traits<__node_base_ptr>::
2272 pointer_to(const_cast<__node_base&>(_M_before_begin));
2273 while (__loc._M_before->_M_nxt)
2274 {
2275 if (this->_M_key_equals(__k, *__loc._M_node()))
2276 return __loc;
2277 __loc._M_before = __loc._M_before->_M_nxt;
2278 }
2279 __loc._M_before = nullptr; // Didn't find it.
2280 }
2281
2282 __loc._M_hash_code = this->_M_hash_code(__k);
2283 __loc._M_bucket_index = _M_bucket_index(__loc._M_hash_code);
2284
2285 if (__size > __small_size_threshold())
2286 __loc._M_before = _M_find_before_node(__loc._M_bucket_index, __k,
2287 __loc._M_hash_code);
2288
2289 return __loc;
2290 }
2291
2292 template<typename _Key, typename _Value, typename _Alloc,
2293 typename _ExtractKey, typename _Equal,
2294 typename _Hash, typename _RangeHash, typename _Unused,
2295 typename _RehashPolicy, typename _Traits>
2296 auto
2297 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2298 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2299 _M_get_previous_node(size_type __bkt, __node_ptr __n)
2300 -> __node_base_ptr
2301 {
2302 __node_base_ptr __prev_n = _M_buckets[__bkt];
2303 while (__prev_n->_M_nxt != __n)
2304 __prev_n = __prev_n->_M_nxt;
2305 return __prev_n;
2306 }
2307
2308#pragma GCC diagnostic push
2309#pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
2310 template<typename _Key, typename _Value, typename _Alloc,
2311 typename _ExtractKey, typename _Equal,
2312 typename _Hash, typename _RangeHash, typename _Unused,
2313 typename _RehashPolicy, typename _Traits>
2314 template<typename... _Args>
2315 auto
2316 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2317 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2318 _M_emplace_uniq(_Args&&... __args)
2319 -> pair<iterator, bool>
2320 {
2321 const key_type* __kp = nullptr;
2322
2323 if constexpr (sizeof...(_Args) == 1)
2324 {
2325 if constexpr (__is_key_type<_Args...>)
2326 {
2327 const auto& __key = _ExtractKey{}(__args...);
2328 __kp = std::__addressof(__key);
2329 }
2330 }
2331 else if constexpr (sizeof...(_Args) == 2)
2332 {
2333 if constexpr (__is_key_type<pair<const _Args&...>>)
2334 {
2335 pair<const _Args&...> __refs(__args...);
2336 const auto& __key = _ExtractKey{}(__refs);
2337 __kp = std::__addressof(__key);
2338 }
2339 }
2340
2341 _Scoped_node __node { __node_ptr(), this }; // Do not create node yet.
2342 __hash_code __code = 0;
2343 size_type __bkt = 0;
2344
2345 if (__kp == nullptr)
2346 {
2347 // Didn't extract a key from the args, so build the node.
2348 __node._M_node
2349 = this->_M_allocate_node(std::forward<_Args>(__args)...);
2350 const key_type& __key = _ExtractKey{}(__node._M_node->_M_v());
2351 __kp = std::__addressof(__key);
2352 }
2353
2354 if (auto __loc = _M_locate(*__kp))
2355 // There is already an equivalent node, no insertion.
2356 return { iterator(__loc), false };
2357 else
2358 {
2359 __code = __loc._M_hash_code;
2360 __bkt = __loc._M_bucket_index;
2361 }
2362
2363 if (!__node._M_node)
2364 __node._M_node
2365 = this->_M_allocate_node(std::forward<_Args>(__args)...);
2366
2367 // Insert the node
2368 auto __pos = _M_insert_unique_node(__bkt, __code, __node._M_node);
2369 __node._M_node = nullptr;
2370 return { __pos, true };
2371 }
2372#pragma GCC diagnostic pop
2373
2374 template<typename _Key, typename _Value, typename _Alloc,
2375 typename _ExtractKey, typename _Equal,
2376 typename _Hash, typename _RangeHash, typename _Unused,
2377 typename _RehashPolicy, typename _Traits>
2378 template<typename... _Args>
2379 auto
2380 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2381 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2382 _M_emplace_multi(const_iterator __hint, _Args&&... __args)
2383 -> iterator
2384 {
2385 // First build the node to get its hash code.
2386 _Scoped_node __node { this, std::forward<_Args>(__args)... };
2387 const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
2388
2389 auto __res = this->_M_compute_hash_code(__hint._M_cur, __k);
2390 auto __pos
2391 = _M_insert_multi_node(__res.first, __res.second, __node._M_node);
2392 __node._M_node = nullptr;
2393 return __pos;
2394 }
2395
2396 template<typename _Key, typename _Value, typename _Alloc,
2397 typename _ExtractKey, typename _Equal,
2398 typename _Hash, typename _RangeHash, typename _Unused,
2399 typename _RehashPolicy, typename _Traits>
2400 void
2401 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2402 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2403 _M_rehash_insert(size_type __n)
2404 {
2405 using __pair_type = std::pair<bool, std::size_t>;
2406 if (__n == 0)
2407 return;
2408
2409 __rehash_guard_t __rehash_guard(_M_rehash_policy);
2410 __pair_type __do_rehash
2411 = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, __n);
2412
2413 if (__do_rehash.first)
2414 _M_rehash(__do_rehash.second, false_type{});
2415
2416 __rehash_guard._M_guarded_obj = nullptr;
2417 }
2418
2419
2420 template<typename _Key, typename _Value, typename _Alloc,
2421 typename _ExtractKey, typename _Equal,
2422 typename _Hash, typename _RangeHash, typename _Unused,
2423 typename _RehashPolicy, typename _Traits>
2424 template<typename _InputIterator>
2425 void
2426 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2427 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2428 _M_insert_range_multi(_InputIterator __first, _InputIterator __last)
2429 {
2430 _M_rehash_insert(__detail::__distance_fw(__first, __last));
2431 for (; __first != __last; ++__first)
2432 _M_emplace_multi(cend(), *__first);
2433 }
2434
2435 template<typename _Key, typename _Value, typename _Alloc,
2436 typename _ExtractKey, typename _Equal,
2437 typename _Hash, typename _RangeHash, typename _Unused,
2438 typename _RehashPolicy, typename _Traits>
2439 auto
2440 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2441 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2442 _M_compute_hash_code(__node_ptr __hint, const key_type& __k) const
2443 -> pair<__node_ptr, __hash_code>
2444 {
2445 if (size() <= __small_size_threshold())
2446 {
2447 if (__hint)
2448 {
2449 for (auto __it = __hint; __it; __it = __it->_M_next())
2450 if (this->_M_key_equals(__k, *__it))
2451 return { __it, this->_M_hash_code(*__it) };
2452 }
2453
2454 for (auto __it = _M_begin(); __it != __hint; __it = __it->_M_next())
2455 if (this->_M_key_equals(__k, *__it))
2456 return { __it, this->_M_hash_code(*__it) };
2457
2458 __hint = nullptr;
2459 }
2460
2461 return { __hint, this->_M_hash_code(__k) };
2462 }
2463
2464 template<typename _Key, typename _Value, typename _Alloc,
2465 typename _ExtractKey, typename _Equal,
2466 typename _Hash, typename _RangeHash, typename _Unused,
2467 typename _RehashPolicy, typename _Traits>
2468 auto
2469 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2470 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2471 _M_insert_unique_node(size_type __bkt, __hash_code __code,
2472 __node_ptr __node, size_type __n_elt)
2473 -> iterator
2474 {
2475 __rehash_guard_t __rehash_guard(_M_rehash_policy);
2477 = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count,
2478 __n_elt);
2479
2480 if (__do_rehash.first)
2481 {
2482 _M_rehash(__do_rehash.second, true_type{});
2483 __bkt = _M_bucket_index(__code);
2484 }
2485
2486 __rehash_guard._M_guarded_obj = nullptr;
2487 _M_store_code(*__node, __code);
2488
2489 // Always insert at the beginning of the bucket.
2490 _M_insert_bucket_begin(__bkt, __node);
2491 ++_M_element_count;
2492 return iterator(__node);
2493 }
2494
2495 template<typename _Key, typename _Value, typename _Alloc,
2496 typename _ExtractKey, typename _Equal,
2497 typename _Hash, typename _RangeHash, typename _Unused,
2498 typename _RehashPolicy, typename _Traits>
2499 auto
2500 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2501 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2502 _M_insert_multi_node(__node_ptr __hint,
2503 __hash_code __code, __node_ptr __node)
2504 -> iterator
2505 {
2506 __rehash_guard_t __rehash_guard(_M_rehash_policy);
2508 = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1);
2509
2510 if (__do_rehash.first)
2511 _M_rehash(__do_rehash.second, false_type{});
2512
2513 __rehash_guard._M_guarded_obj = nullptr;
2514 _M_store_code(*__node, __code);
2515 const key_type& __k = _ExtractKey{}(__node->_M_v());
2516 size_type __bkt = _M_bucket_index(__code);
2517
2518 // Find the node before an equivalent one or use hint if it exists and
2519 // if it is equivalent.
2520 __node_base_ptr __prev
2521 = __builtin_expect(__hint != nullptr, false)
2522 && this->_M_equals(__k, __code, *__hint)
2523 ? __hint
2524 : _M_find_before_node(__bkt, __k, __code);
2525
2526 if (__prev)
2527 {
2528 // Insert after the node before the equivalent one.
2529 __node->_M_nxt = __prev->_M_nxt;
2530 __prev->_M_nxt = __node;
2531 if (__builtin_expect(__prev == __hint, false))
2532 // hint might be the last bucket node, in this case we need to
2533 // update next bucket.
2534 if (__node->_M_nxt
2535 && !this->_M_equals(__k, __code, *__node->_M_next()))
2536 {
2537 size_type __next_bkt = _M_bucket_index(*__node->_M_next());
2538 if (__next_bkt != __bkt)
2539 _M_buckets[__next_bkt] = __node;
2540 }
2541 }
2542 else
2543 // The inserted node has no equivalent in the hashtable. We must
2544 // insert the new node at the beginning of the bucket to preserve
2545 // equivalent elements' relative positions.
2546 _M_insert_bucket_begin(__bkt, __node);
2547 ++_M_element_count;
2548 return iterator(__node);
2549 }
2550
2551 template<typename _Key, typename _Value, typename _Alloc,
2552 typename _ExtractKey, typename _Equal,
2553 typename _Hash, typename _RangeHash, typename _Unused,
2554 typename _RehashPolicy, typename _Traits>
2555 auto
2556 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2557 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2558 erase(const_iterator __it)
2559 -> iterator
2560 {
2561 __node_ptr __n = __it._M_cur;
2562 std::size_t __bkt = _M_bucket_index(*__n);
2563
2564 // Look for previous node to unlink it from the erased one, this
2565 // is why we need buckets to contain the before begin to make
2566 // this search fast.
2567 __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
2568 return _M_erase(__bkt, __prev_n, __n);
2569 }
2570
2571 template<typename _Key, typename _Value, typename _Alloc,
2572 typename _ExtractKey, typename _Equal,
2573 typename _Hash, typename _RangeHash, typename _Unused,
2574 typename _RehashPolicy, typename _Traits>
2575 auto
2576 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2577 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2578 _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n)
2579 -> iterator
2580 {
2581 if (__prev_n == _M_buckets[__bkt])
2582 _M_remove_bucket_begin(__bkt, __n->_M_next(),
2583 __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
2584 else if (__n->_M_nxt)
2585 {
2586 size_type __next_bkt = _M_bucket_index(*__n->_M_next());
2587 if (__next_bkt != __bkt)
2588 _M_buckets[__next_bkt] = __prev_n;
2589 }
2590
2591 __prev_n->_M_nxt = __n->_M_nxt;
2592 iterator __result(__n->_M_next());
2593 this->_M_deallocate_node(__n);
2594 --_M_element_count;
2595
2596 return __result;
2597 }
2598
2599#pragma GCC diagnostic push
2600#pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
2601 template<typename _Key, typename _Value, typename _Alloc,
2602 typename _ExtractKey, typename _Equal,
2603 typename _Hash, typename _RangeHash, typename _Unused,
2604 typename _RehashPolicy, typename _Traits>
2605 auto
2606 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2607 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2608 erase(const key_type& __k)
2609 -> size_type
2610 {
2611 auto __loc = _M_locate(__k);
2612 if (!__loc)
2613 return 0;
2614
2615 __node_base_ptr __prev_n = __loc._M_before;
2616 __node_ptr __n = __loc._M_node();
2617 auto __bkt = __loc._M_bucket_index;
2618 if (__bkt == size_type(-1))
2619 __bkt = _M_bucket_index(*__n);
2620
2621 if constexpr (__unique_keys::value)
2622 {
2623 _M_erase(__bkt, __prev_n, __n);
2624 return 1;
2625 }
2626 else
2627 {
2628 // _GLIBCXX_RESOLVE_LIB_DEFECTS
2629 // 526. Is it undefined if a function in the standard changes
2630 // in parameters?
2631 // We use one loop to find all matching nodes and another to
2632 // deallocate them so that the key stays valid during the first loop.
2633 // It might be invalidated indirectly when destroying nodes.
2634 __node_ptr __n_last = __n->_M_next();
2635 while (__n_last && this->_M_node_equals(*__n, *__n_last))
2636 __n_last = __n_last->_M_next();
2637
2638 std::size_t __n_last_bkt
2639 = __n_last ? _M_bucket_index(*__n_last) : __bkt;
2640
2641 // Deallocate nodes.
2642 size_type __result = 0;
2643 do
2644 {
2645 __node_ptr __p = __n->_M_next();
2646 this->_M_deallocate_node(__n);
2647 __n = __p;
2648 ++__result;
2649 }
2650 while (__n != __n_last);
2651
2652 _M_element_count -= __result;
2653 if (__prev_n == _M_buckets[__bkt])
2654 _M_remove_bucket_begin(__bkt, __n_last, __n_last_bkt);
2655 else if (__n_last_bkt != __bkt)
2656 _M_buckets[__n_last_bkt] = __prev_n;
2657 __prev_n->_M_nxt = __n_last;
2658 return __result;
2659 }
2660 }
2661#pragma GCC diagnostic pop
2662
2663 template<typename _Key, typename _Value, typename _Alloc,
2664 typename _ExtractKey, typename _Equal,
2665 typename _Hash, typename _RangeHash, typename _Unused,
2666 typename _RehashPolicy, typename _Traits>
2667 auto
2668 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2669 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2670 erase(const_iterator __first, const_iterator __last)
2671 -> iterator
2672 {
2673 __node_ptr __n = __first._M_cur;
2674 __node_ptr __last_n = __last._M_cur;
2675 if (__n == __last_n)
2676 return iterator(__n);
2677
2678 std::size_t __bkt = _M_bucket_index(*__n);
2679
2680 __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
2681 bool __is_bucket_begin = __n == _M_bucket_begin(__bkt);
2682 std::size_t __n_bkt = __bkt;
2683 for (;;)
2684 {
2685 do
2686 {
2687 __node_ptr __tmp = __n;
2688 __n = __n->_M_next();
2689 this->_M_deallocate_node(__tmp);
2690 --_M_element_count;
2691 if (!__n)
2692 break;
2693 __n_bkt = _M_bucket_index(*__n);
2694 }
2695 while (__n != __last_n && __n_bkt == __bkt);
2696 if (__is_bucket_begin)
2697 _M_remove_bucket_begin(__bkt, __n, __n_bkt);
2698 if (__n == __last_n)
2699 break;
2700 __is_bucket_begin = true;
2701 __bkt = __n_bkt;
2702 }
2703
2704 if (__n && (__n_bkt != __bkt || __is_bucket_begin))
2705 _M_buckets[__n_bkt] = __prev_n;
2706 __prev_n->_M_nxt = __n;
2707 return iterator(__n);
2708 }
2709
2710 template<typename _Key, typename _Value, typename _Alloc,
2711 typename _ExtractKey, typename _Equal,
2712 typename _Hash, typename _RangeHash, typename _Unused,
2713 typename _RehashPolicy, typename _Traits>
2714 void
2715 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2716 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2717 clear() noexcept
2718 {
2719 this->_M_deallocate_nodes(_M_begin());
2720 std::fill_n(_M_buckets, _M_bucket_count, nullptr);
2721 _M_element_count = 0;
2722 _M_before_begin._M_nxt = nullptr;
2723 }
2724
2725 template<typename _Key, typename _Value, typename _Alloc,
2726 typename _ExtractKey, typename _Equal,
2727 typename _Hash, typename _RangeHash, typename _Unused,
2728 typename _RehashPolicy, typename _Traits>
2729 void
2730 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2731 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2732 rehash(size_type __bkt_count)
2733 {
2734 __rehash_guard_t __rehash_guard(_M_rehash_policy);
2735 __bkt_count
2736 = std::max(_M_rehash_policy._M_bkt_for_elements(_M_element_count + 1),
2737 __bkt_count);
2738 __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count);
2739
2740 if (__bkt_count != _M_bucket_count)
2741 {
2742 _M_rehash(__bkt_count, __unique_keys{});
2743 __rehash_guard._M_guarded_obj = nullptr;
2744 }
2745 }
2746
2747 // Rehash when there is no equivalent elements.
2748 template<typename _Key, typename _Value, typename _Alloc,
2749 typename _ExtractKey, typename _Equal,
2750 typename _Hash, typename _RangeHash, typename _Unused,
2751 typename _RehashPolicy, typename _Traits>
2752 void
2753 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2754 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2755 _M_rehash(size_type __bkt_count, true_type /* __uks */)
2756 {
2757 __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2758 __node_ptr __p = _M_begin();
2759 _M_before_begin._M_nxt = nullptr;
2760 std::size_t __bbegin_bkt = 0;
2761 while (__p)
2762 {
2763 __node_ptr __next = __p->_M_next();
2764 std::size_t __bkt
2765 = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2766 if (!__new_buckets[__bkt])
2767 {
2768 __p->_M_nxt = _M_before_begin._M_nxt;
2769 _M_before_begin._M_nxt = __p;
2770 __new_buckets[__bkt] = &_M_before_begin;
2771 if (__p->_M_nxt)
2772 __new_buckets[__bbegin_bkt] = __p;
2773 __bbegin_bkt = __bkt;
2774 }
2775 else
2776 {
2777 __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2778 __new_buckets[__bkt]->_M_nxt = __p;
2779 }
2780
2781 __p = __next;
2782 }
2783
2784 _M_deallocate_buckets();
2785 _M_bucket_count = __bkt_count;
2786 _M_buckets = __new_buckets;
2787 }
2788
2789 // Rehash when there can be equivalent elements, preserve their relative
2790 // order.
2791 template<typename _Key, typename _Value, typename _Alloc,
2792 typename _ExtractKey, typename _Equal,
2793 typename _Hash, typename _RangeHash, typename _Unused,
2794 typename _RehashPolicy, typename _Traits>
2795 void
2796 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2797 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2798 _M_rehash(size_type __bkt_count, false_type /* __uks */)
2799 {
2800 __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2801 __node_ptr __p = _M_begin();
2802 _M_before_begin._M_nxt = nullptr;
2803 std::size_t __bbegin_bkt = 0;
2804 std::size_t __prev_bkt = 0;
2805 __node_ptr __prev_p = nullptr;
2806 bool __check_bucket = false;
2807
2808 while (__p)
2809 {
2810 __node_ptr __next = __p->_M_next();
2811 std::size_t __bkt
2812 = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2813
2814 if (__prev_p && __prev_bkt == __bkt)
2815 {
2816 // Previous insert was already in this bucket, we insert after
2817 // the previously inserted one to preserve equivalent elements
2818 // relative order.
2819 __p->_M_nxt = __prev_p->_M_nxt;
2820 __prev_p->_M_nxt = __p;
2821
2822 // Inserting after a node in a bucket require to check that we
2823 // haven't change the bucket last node, in this case next
2824 // bucket containing its before begin node must be updated. We
2825 // schedule a check as soon as we move out of the sequence of
2826 // equivalent nodes to limit the number of checks.
2827 __check_bucket = true;
2828 }
2829 else
2830 {
2831 if (__check_bucket)
2832 {
2833 // Check if we shall update the next bucket because of
2834 // insertions into __prev_bkt bucket.
2835 if (__prev_p->_M_nxt)
2836 {
2837 std::size_t __next_bkt
2838 = __hash_code_base::_M_bucket_index(
2839 *__prev_p->_M_next(), __bkt_count);
2840 if (__next_bkt != __prev_bkt)
2841 __new_buckets[__next_bkt] = __prev_p;
2842 }
2843 __check_bucket = false;
2844 }
2845
2846 if (!__new_buckets[__bkt])
2847 {
2848 __p->_M_nxt = _M_before_begin._M_nxt;
2849 _M_before_begin._M_nxt = __p;
2850 __new_buckets[__bkt] = &_M_before_begin;
2851 if (__p->_M_nxt)
2852 __new_buckets[__bbegin_bkt] = __p;
2853 __bbegin_bkt = __bkt;
2854 }
2855 else
2856 {
2857 __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2858 __new_buckets[__bkt]->_M_nxt = __p;
2859 }
2860 }
2861 __prev_p = __p;
2862 __prev_bkt = __bkt;
2863 __p = __next;
2864 }
2865
2866 if (__check_bucket && __prev_p->_M_nxt)
2867 {
2868 std::size_t __next_bkt
2869 = __hash_code_base::_M_bucket_index(*__prev_p->_M_next(),
2870 __bkt_count);
2871 if (__next_bkt != __prev_bkt)
2872 __new_buckets[__next_bkt] = __prev_p;
2873 }
2874
2875 _M_deallocate_buckets();
2876 _M_bucket_count = __bkt_count;
2877 _M_buckets = __new_buckets;
2878 }
2879
2880#pragma GCC diagnostic push
2881#pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
2882
2883 // This is for implementing equality comparison for unordered containers,
2884 // per N3068, by John Lakos and Pablo Halpern.
2885 // Algorithmically, we follow closely the reference implementations therein.
2886 template<typename _Key, typename _Value, typename _Alloc,
2887 typename _ExtractKey, typename _Equal,
2888 typename _Hash, typename _RangeHash, typename _Unused,
2889 typename _RehashPolicy, typename _Traits>
2890 bool
2891 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2892 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2893 _M_equal(const _Hashtable& __other) const
2894 {
2895 if (size() != __other.size())
2896 return false;
2897
2898 if constexpr (__unique_keys::value)
2899 for (auto __x_n = _M_begin(); __x_n; __x_n = __x_n->_M_next())
2900 {
2901 std::size_t __ybkt = __other._M_bucket_index_ext(*__x_n);
2902 auto __prev_n = __other._M_buckets[__ybkt];
2903 if (!__prev_n)
2904 return false;
2905
2906 for (__node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);;
2907 __n = __n->_M_next())
2908 {
2909 if (__n->_M_v() == __x_n->_M_v())
2910 break;
2911
2912 if (!__n->_M_nxt
2913 || __other._M_bucket_index(*__n->_M_next()) != __ybkt)
2914 return false;
2915 }
2916 }
2917 else // non-unique keys
2918 for (auto __x_n = _M_begin(); __x_n;)
2919 {
2920 std::size_t __x_count = 1;
2921 auto __x_n_end = __x_n->_M_next();
2922 for (; __x_n_end
2923 && key_eq()(_ExtractKey{}(__x_n->_M_v()),
2924 _ExtractKey{}(__x_n_end->_M_v()));
2925 __x_n_end = __x_n_end->_M_next())
2926 ++__x_count;
2927
2928 std::size_t __ybkt = __other._M_bucket_index_ext(*__x_n);
2929 auto __y_prev_n = __other._M_buckets[__ybkt];
2930 if (!__y_prev_n)
2931 return false;
2932
2933 __node_ptr __y_n = static_cast<__node_ptr>(__y_prev_n->_M_nxt);
2934 for (;;)
2935 {
2936 if (key_eq()(_ExtractKey{}(__y_n->_M_v()),
2937 _ExtractKey{}(__x_n->_M_v())))
2938 break;
2939
2940 auto __y_ref_n = __y_n;
2941 for (__y_n = __y_n->_M_next(); __y_n; __y_n = __y_n->_M_next())
2942 if (!__other._M_node_equals(*__y_ref_n, *__y_n))
2943 break;
2944
2945 if (!__y_n || __other._M_bucket_index(*__y_n) != __ybkt)
2946 return false;
2947 }
2948
2949 auto __y_n_end = __y_n;
2950 for (; __y_n_end; __y_n_end = __y_n_end->_M_next())
2951 if (--__x_count == 0)
2952 break;
2953
2954 if (__x_count != 0)
2955 return false;
2956
2957 const_iterator __itx(__x_n), __itx_end(__x_n_end);
2958 const_iterator __ity(__y_n);
2959 if (!std::is_permutation(__itx, __itx_end, __ity))
2960 return false;
2961
2962 __x_n = __x_n_end;
2963 }
2964
2965 return true;
2966 }
2967#pragma GCC diagnostic pop
2968
2969#ifdef __glibcxx_node_extract // >= C++17 && HOSTED
2970 template<typename, typename, typename> class _Hash_merge_helper { };
2971#endif // C++17
2972
2973#if __cpp_deduction_guides >= 201606
2974 // Used to constrain deduction guides
2975 template<typename _Hash>
2976 using _RequireNotAllocatorOrIntegral
2977 = __enable_if_t<!__or_<is_integral<_Hash>, __is_allocator<_Hash>>::value>;
2978#endif
2979
2980/// @endcond
2981_GLIBCXX_END_NAMESPACE_VERSION
2982} // namespace std
2983
2984#pragma GCC diagnostic pop
2985
2986#endif // _HASHTABLE_H
__bool_constant< true > true_type
The type used as a compile-time boolean with true value.
Definition type_traits:117
pair(_T1, _T2) -> pair< _T1, _T2 >
Two pairs are equal iff their members are equal.
auto declval() noexcept -> decltype(__declval< _Tp >(0))
Definition type_traits:2671
constexpr tuple< _Elements &&... > forward_as_tuple(_Elements &&... __args) noexcept
Create a tuple of lvalue or rvalue references to the arguments.
Definition tuple:2680
constexpr std::remove_reference< _Tp >::type && move(_Tp &&__t) noexcept
Convert a value to an rvalue.
Definition move.h:138
constexpr piecewise_construct_t piecewise_construct
Tag for piecewise construction of std::pair objects.
Definition stl_pair.h:82
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
Definition move.h:52
constexpr _Tp && forward(typename std::remove_reference< _Tp >::type &__t) noexcept
Forward an lvalue.
Definition move.h:72
_Tp * end(valarray< _Tp > &__va) noexcept
Return an iterator pointing to one past the last element of the valarray.
Definition valarray:1251
_Tp * begin(valarray< _Tp > &__va) noexcept
Return an iterator pointing to the first element of the valarray.
Definition valarray:1229
constexpr const _Tp & max(const _Tp &, const _Tp &)
This does what you think it does.
ISO C++ entities toplevel namespace is std.
constexpr iterator_traits< _InputIterator >::difference_type distance(_InputIterator __first, _InputIterator __last)
A generalization of pointer arithmetic.
constexpr auto cend(const _Container &__cont) noexcept(noexcept(std::end(__cont))) -> decltype(std::end(__cont))
Return an iterator pointing to one past the last element of the const container.
constexpr auto empty(const _Container &__cont) noexcept(noexcept(__cont.empty())) -> decltype(__cont.empty())
Return whether a container is empty.
constexpr auto size(const _Container &__cont) noexcept(noexcept(__cont.size())) -> decltype(__cont.size())
Return the size of a container.
constexpr auto cbegin(const _Container &__cont) noexcept(noexcept(std::begin(__cont))) -> decltype(std::begin(__cont))
Return an iterator pointing to the first element of the const container.
is_default_constructible
Definition type_traits:1204
Struct holding two objects of arbitrary type.
Definition stl_pair.h:304
_T1 first
The first member.
Definition stl_pair.h:308
_T2 second
The second member.
Definition stl_pair.h:309