30#ifndef _GLIBCXX_ATOMIC_BASE_H
31#define _GLIBCXX_ATOMIC_BASE_H 1
34#pragma GCC system_header
42#if __cplusplus > 201703L && _GLIBCXX_HOSTED
46#ifndef _GLIBCXX_ALWAYS_INLINE
47#define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
52namespace std _GLIBCXX_VISIBILITY(default)
54_GLIBCXX_BEGIN_NAMESPACE_VERSION
64#if __cplusplus > 201703L
75 inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
76 inline constexpr memory_order memory_order_consume = memory_order::consume;
77 inline constexpr memory_order memory_order_acquire = memory_order::acquire;
78 inline constexpr memory_order memory_order_release = memory_order::release;
79 inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
80 inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
94 enum __memory_order_modifier
96 __memory_order_mask = 0x0ffff,
97 __memory_order_modifier_mask = 0xffff0000,
98 __memory_order_hle_acquire = 0x10000,
99 __memory_order_hle_release = 0x20000
121 return __m == memory_order_acq_rel ? memory_order_acquire
122 : __m == memory_order_release ? memory_order_relaxed : __m;
128 return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
129 | __memory_order_modifier(__m & __memory_order_modifier_mask));
133 __is_valid_cmpexch_failure_order(
memory_order __m)
noexcept
135 return (__m & __memory_order_mask) != memory_order_release
136 && (__m & __memory_order_mask) != memory_order_acq_rel;
140 template<
typename _IntTp>
141 struct __atomic_base;
145 _GLIBCXX_ALWAYS_INLINE
void
147 { __atomic_thread_fence(
int(__m)); }
149 _GLIBCXX_ALWAYS_INLINE
void
151 { __atomic_signal_fence(
int(__m)); }
154 template<
typename _Tp>
163#if __glibcxx_atomic_value_initialization
164# define _GLIBCXX20_INIT(I) = I
166# define _GLIBCXX20_INIT(I)
170#define ATOMIC_VAR_INIT(_VI) { _VI }
172 template<
typename _Tp>
175 template<
typename _Tp>
179#if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
180 typedef bool __atomic_flag_data_type;
182 typedef unsigned char __atomic_flag_data_type;
197 _GLIBCXX_BEGIN_EXTERN_C
199 struct __atomic_flag_base
201 __atomic_flag_data_type _M_i _GLIBCXX20_INIT({});
204 _GLIBCXX_END_EXTERN_C
208#define ATOMIC_FLAG_INIT { 0 }
211 struct atomic_flag :
public __atomic_flag_base
213 atomic_flag()
noexcept =
default;
214 ~atomic_flag()
noexcept =
default;
215 atomic_flag(
const atomic_flag&) =
delete;
216 atomic_flag& operator=(
const atomic_flag&) =
delete;
217 atomic_flag& operator=(
const atomic_flag&)
volatile =
delete;
220 constexpr atomic_flag(
bool __i) noexcept
221 : __atomic_flag_base{ _S_init(__i) }
224 _GLIBCXX_ALWAYS_INLINE
bool
225 test_and_set(
memory_order __m = memory_order_seq_cst)
noexcept
227 return __atomic_test_and_set (&_M_i,
int(__m));
230 _GLIBCXX_ALWAYS_INLINE
bool
231 test_and_set(
memory_order __m = memory_order_seq_cst)
volatile noexcept
233 return __atomic_test_and_set (&_M_i,
int(__m));
236#ifdef __glibcxx_atomic_flag_test
237 _GLIBCXX_ALWAYS_INLINE
bool
238 test(
memory_order __m = memory_order_seq_cst)
const noexcept
240 __atomic_flag_data_type __v;
241 __atomic_load(&_M_i, &__v,
int(__m));
242 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
245 _GLIBCXX_ALWAYS_INLINE
bool
246 test(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
248 __atomic_flag_data_type __v;
249 __atomic_load(&_M_i, &__v,
int(__m));
250 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
254#if __glibcxx_atomic_wait
255 _GLIBCXX_ALWAYS_INLINE
void
259 const __atomic_flag_data_type __v
260 = __old ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0;
262 std::__atomic_wait_address_v(&_M_i, __v,
263 [__m,
this] {
return __atomic_load_n(&_M_i,
int(__m)); });
268 _GLIBCXX_ALWAYS_INLINE
void
269 notify_one()
noexcept
270 { std::__atomic_notify_address(&_M_i,
false); }
274 _GLIBCXX_ALWAYS_INLINE
void
275 notify_all()
noexcept
276 { std::__atomic_notify_address(&_M_i,
true); }
281 _GLIBCXX_ALWAYS_INLINE
void
285 = __m & __memory_order_mask;
286 __glibcxx_assert(__b != memory_order_consume);
287 __glibcxx_assert(__b != memory_order_acquire);
288 __glibcxx_assert(__b != memory_order_acq_rel);
290 __atomic_clear (&_M_i,
int(__m));
293 _GLIBCXX_ALWAYS_INLINE
void
294 clear(
memory_order __m = memory_order_seq_cst)
volatile noexcept
297 = __m & __memory_order_mask;
298 __glibcxx_assert(__b != memory_order_consume);
299 __glibcxx_assert(__b != memory_order_acquire);
300 __glibcxx_assert(__b != memory_order_acq_rel);
302 __atomic_clear (&_M_i,
int(__m));
306 static constexpr __atomic_flag_data_type
308 {
return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
338 namespace __atomic_impl
340 template<
typename _Tp>
341 using _Val =
typename remove_volatile<_Tp>::type;
343#if __glibcxx_atomic_min_max
344 template<
typename _Tp>
346 __fetch_min(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m)
noexcept;
348 template<
typename _Tp>
350 __fetch_max(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m)
noexcept;
354 template<
typename _ITp>
357 using value_type = _ITp;
358 using difference_type = value_type;
361 typedef _ITp __int_type;
363 static constexpr int _S_alignment =
364 sizeof(_ITp) >
alignof(_ITp) ?
sizeof(_ITp) : alignof(_ITp);
366 alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0);
369 __atomic_base() noexcept = default;
370 ~__atomic_base() noexcept = default;
371 __atomic_base(const __atomic_base&) = delete;
372 __atomic_base& operator=(const __atomic_base&) = delete;
373 __atomic_base& operator=(const __atomic_base&) volatile = delete;
375 constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
377 operator __int_type() const noexcept
380 operator __int_type() const volatile noexcept
384 operator=(__int_type __i)
noexcept
391 operator=(__int_type __i)
volatile noexcept
398 operator++(
int)
noexcept
399 {
return fetch_add(1); }
402 operator++(
int)
volatile noexcept
403 {
return fetch_add(1); }
406 operator--(
int)
noexcept
407 {
return fetch_sub(1); }
410 operator--(
int)
volatile noexcept
411 {
return fetch_sub(1); }
414 operator++() noexcept
415 {
return __atomic_add_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
418 operator++() volatile noexcept
419 {
return __atomic_add_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
422 operator--() noexcept
423 {
return __atomic_sub_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
426 operator--() volatile noexcept
427 {
return __atomic_sub_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
430 operator+=(__int_type __i)
noexcept
431 {
return __atomic_add_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
434 operator+=(__int_type __i)
volatile noexcept
435 {
return __atomic_add_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
438 operator-=(__int_type __i)
noexcept
439 {
return __atomic_sub_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
442 operator-=(__int_type __i)
volatile noexcept
443 {
return __atomic_sub_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
446 operator&=(__int_type __i)
noexcept
447 {
return __atomic_and_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
450 operator&=(__int_type __i)
volatile noexcept
451 {
return __atomic_and_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
454 operator|=(__int_type __i)
noexcept
455 {
return __atomic_or_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
458 operator|=(__int_type __i)
volatile noexcept
459 {
return __atomic_or_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
462 operator^=(__int_type __i)
noexcept
463 {
return __atomic_xor_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
466 operator^=(__int_type __i)
volatile noexcept
467 {
return __atomic_xor_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
470 is_lock_free() const noexcept
473 return __atomic_is_lock_free(
sizeof(_M_i),
474 reinterpret_cast<void *
>(-_S_alignment));
478 is_lock_free() const volatile noexcept
481 return __atomic_is_lock_free(
sizeof(_M_i),
482 reinterpret_cast<void *
>(-_S_alignment));
485 _GLIBCXX_ALWAYS_INLINE
void
486 store(__int_type __i, memory_order __m = memory_order_seq_cst)
noexcept
489 = __m & __memory_order_mask;
490 __glibcxx_assert(__b != memory_order_acquire);
491 __glibcxx_assert(__b != memory_order_acq_rel);
492 __glibcxx_assert(__b != memory_order_consume);
494 __atomic_store_n(&_M_i, __i,
int(__m));
497 _GLIBCXX_ALWAYS_INLINE
void
498 store(__int_type __i,
499 memory_order __m = memory_order_seq_cst)
volatile noexcept
502 = __m & __memory_order_mask;
503 __glibcxx_assert(__b != memory_order_acquire);
504 __glibcxx_assert(__b != memory_order_acq_rel);
505 __glibcxx_assert(__b != memory_order_consume);
507 __atomic_store_n(&_M_i, __i,
int(__m));
510 _GLIBCXX_ALWAYS_INLINE __int_type
511 load(memory_order __m = memory_order_seq_cst)
const noexcept
514 = __m & __memory_order_mask;
515 __glibcxx_assert(__b != memory_order_release);
516 __glibcxx_assert(__b != memory_order_acq_rel);
518 return __atomic_load_n(&_M_i,
int(__m));
521 _GLIBCXX_ALWAYS_INLINE __int_type
522 load(memory_order __m = memory_order_seq_cst)
const volatile noexcept
525 = __m & __memory_order_mask;
526 __glibcxx_assert(__b != memory_order_release);
527 __glibcxx_assert(__b != memory_order_acq_rel);
529 return __atomic_load_n(&_M_i,
int(__m));
532 _GLIBCXX_ALWAYS_INLINE __int_type
533 exchange(__int_type __i,
534 memory_order __m = memory_order_seq_cst)
noexcept
536 return __atomic_exchange_n(&_M_i, __i,
int(__m));
540 _GLIBCXX_ALWAYS_INLINE __int_type
541 exchange(__int_type __i,
542 memory_order __m = memory_order_seq_cst)
volatile noexcept
544 return __atomic_exchange_n(&_M_i, __i,
int(__m));
547 _GLIBCXX_ALWAYS_INLINE
bool
548 compare_exchange_weak(__int_type& __i1, __int_type __i2,
549 memory_order __m1, memory_order __m2)
noexcept
551 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
553 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
554 int(__m1),
int(__m2));
557 _GLIBCXX_ALWAYS_INLINE
bool
558 compare_exchange_weak(__int_type& __i1, __int_type __i2,
560 memory_order __m2)
volatile noexcept
562 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
564 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
565 int(__m1),
int(__m2));
568 _GLIBCXX_ALWAYS_INLINE
bool
569 compare_exchange_weak(__int_type& __i1, __int_type __i2,
570 memory_order __m = memory_order_seq_cst)
noexcept
572 return compare_exchange_weak(__i1, __i2, __m,
573 __cmpexch_failure_order(__m));
576 _GLIBCXX_ALWAYS_INLINE
bool
577 compare_exchange_weak(__int_type& __i1, __int_type __i2,
578 memory_order __m = memory_order_seq_cst)
volatile noexcept
580 return compare_exchange_weak(__i1, __i2, __m,
581 __cmpexch_failure_order(__m));
584 _GLIBCXX_ALWAYS_INLINE
bool
585 compare_exchange_strong(__int_type& __i1, __int_type __i2,
586 memory_order __m1, memory_order __m2)
noexcept
588 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
590 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
591 int(__m1),
int(__m2));
594 _GLIBCXX_ALWAYS_INLINE
bool
595 compare_exchange_strong(__int_type& __i1, __int_type __i2,
597 memory_order __m2)
volatile noexcept
599 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
601 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
602 int(__m1),
int(__m2));
605 _GLIBCXX_ALWAYS_INLINE
bool
606 compare_exchange_strong(__int_type& __i1, __int_type __i2,
607 memory_order __m = memory_order_seq_cst)
noexcept
609 return compare_exchange_strong(__i1, __i2, __m,
610 __cmpexch_failure_order(__m));
613 _GLIBCXX_ALWAYS_INLINE
bool
614 compare_exchange_strong(__int_type& __i1, __int_type __i2,
615 memory_order __m = memory_order_seq_cst)
volatile noexcept
617 return compare_exchange_strong(__i1, __i2, __m,
618 __cmpexch_failure_order(__m));
621#if __glibcxx_atomic_wait
622 _GLIBCXX_ALWAYS_INLINE
void
623 wait(__int_type __old,
624 memory_order __m = memory_order_seq_cst)
const noexcept
626 std::__atomic_wait_address_v(&_M_i, __old,
627 [__m,
this] {
return this->load(__m); });
632 _GLIBCXX_ALWAYS_INLINE
void
633 notify_one() noexcept
634 { std::__atomic_notify_address(&_M_i,
false); }
638 _GLIBCXX_ALWAYS_INLINE
void
639 notify_all() noexcept
640 { std::__atomic_notify_address(&_M_i,
true); }
645 _GLIBCXX_ALWAYS_INLINE __int_type
646 fetch_add(__int_type __i,
647 memory_order __m = memory_order_seq_cst)
noexcept
648 {
return __atomic_fetch_add(&_M_i, __i,
int(__m)); }
650 _GLIBCXX_ALWAYS_INLINE __int_type
651 fetch_add(__int_type __i,
652 memory_order __m = memory_order_seq_cst)
volatile noexcept
653 {
return __atomic_fetch_add(&_M_i, __i,
int(__m)); }
655 _GLIBCXX_ALWAYS_INLINE __int_type
656 fetch_sub(__int_type __i,
657 memory_order __m = memory_order_seq_cst)
noexcept
658 {
return __atomic_fetch_sub(&_M_i, __i,
int(__m)); }
660 _GLIBCXX_ALWAYS_INLINE __int_type
661 fetch_sub(__int_type __i,
662 memory_order __m = memory_order_seq_cst)
volatile noexcept
663 {
return __atomic_fetch_sub(&_M_i, __i,
int(__m)); }
665 _GLIBCXX_ALWAYS_INLINE __int_type
666 fetch_and(__int_type __i,
667 memory_order __m = memory_order_seq_cst)
noexcept
668 {
return __atomic_fetch_and(&_M_i, __i,
int(__m)); }
670 _GLIBCXX_ALWAYS_INLINE __int_type
671 fetch_and(__int_type __i,
672 memory_order __m = memory_order_seq_cst)
volatile noexcept
673 {
return __atomic_fetch_and(&_M_i, __i,
int(__m)); }
675 _GLIBCXX_ALWAYS_INLINE __int_type
676 fetch_or(__int_type __i,
677 memory_order __m = memory_order_seq_cst)
noexcept
678 {
return __atomic_fetch_or(&_M_i, __i,
int(__m)); }
680 _GLIBCXX_ALWAYS_INLINE __int_type
681 fetch_or(__int_type __i,
682 memory_order __m = memory_order_seq_cst)
volatile noexcept
683 {
return __atomic_fetch_or(&_M_i, __i,
int(__m)); }
685 _GLIBCXX_ALWAYS_INLINE __int_type
686 fetch_xor(__int_type __i,
687 memory_order __m = memory_order_seq_cst)
noexcept
688 {
return __atomic_fetch_xor(&_M_i, __i,
int(__m)); }
690 _GLIBCXX_ALWAYS_INLINE __int_type
691 fetch_xor(__int_type __i,
692 memory_order __m = memory_order_seq_cst)
volatile noexcept
693 {
return __atomic_fetch_xor(&_M_i, __i,
int(__m)); }
695#if __glibcxx_atomic_min_max
696 _GLIBCXX_ALWAYS_INLINE __int_type
697 fetch_min(__int_type __i,
698 memory_order __m = memory_order_seq_cst)
noexcept
699 {
return __atomic_impl::__fetch_min(&_M_i, __i, __m); }
701 _GLIBCXX_ALWAYS_INLINE __int_type
702 fetch_min(__int_type __i,
703 memory_order __m = memory_order_seq_cst)
volatile noexcept
704 {
return __atomic_impl::__fetch_min(&_M_i, __i, __m); }
706 _GLIBCXX_ALWAYS_INLINE __int_type
707 fetch_max(__int_type __i,
708 memory_order __m = memory_order_seq_cst)
noexcept
709 {
return __atomic_impl::__fetch_max(&_M_i, __i, __m); }
711 _GLIBCXX_ALWAYS_INLINE __int_type
712 fetch_max(__int_type __i,
713 memory_order __m = memory_order_seq_cst)
volatile noexcept
714 {
return __atomic_impl::__fetch_max(&_M_i, __i, __m); }
720 template<
typename _PTp>
721 struct __atomic_base<_PTp*>
724 typedef _PTp* __pointer_type;
726 __pointer_type _M_p _GLIBCXX20_INIT(
nullptr);
728 static constexpr ptrdiff_t
729 _S_type_size(ptrdiff_t __d)
730 {
return __d *
sizeof(_PTp); }
733 __atomic_base() noexcept = default;
734 ~__atomic_base() noexcept = default;
735 __atomic_base(const __atomic_base&) = delete;
736 __atomic_base& operator=(const __atomic_base&) = delete;
737 __atomic_base& operator=(const __atomic_base&) volatile = delete;
740 constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
742 operator __pointer_type() const noexcept
745 operator __pointer_type() const volatile noexcept
749 operator=(__pointer_type __p)
noexcept
756 operator=(__pointer_type __p)
volatile noexcept
763 operator++(
int)
noexcept
764 {
return fetch_add(1); }
767 operator++(
int)
volatile noexcept
768 {
return fetch_add(1); }
771 operator--(
int)
noexcept
772 {
return fetch_sub(1); }
775 operator--(
int)
volatile noexcept
776 {
return fetch_sub(1); }
779 operator++() noexcept
780 {
return __atomic_add_fetch(&_M_p, _S_type_size(1),
781 int(memory_order_seq_cst)); }
784 operator++() volatile noexcept
785 {
return __atomic_add_fetch(&_M_p, _S_type_size(1),
786 int(memory_order_seq_cst)); }
789 operator--() noexcept
790 {
return __atomic_sub_fetch(&_M_p, _S_type_size(1),
791 int(memory_order_seq_cst)); }
794 operator--() volatile noexcept
795 {
return __atomic_sub_fetch(&_M_p, _S_type_size(1),
796 int(memory_order_seq_cst)); }
799 operator+=(ptrdiff_t __d)
noexcept
800 {
return __atomic_add_fetch(&_M_p, _S_type_size(__d),
801 int(memory_order_seq_cst)); }
804 operator+=(ptrdiff_t __d)
volatile noexcept
805 {
return __atomic_add_fetch(&_M_p, _S_type_size(__d),
806 int(memory_order_seq_cst)); }
809 operator-=(ptrdiff_t __d)
noexcept
810 {
return __atomic_sub_fetch(&_M_p, _S_type_size(__d),
811 int(memory_order_seq_cst)); }
814 operator-=(ptrdiff_t __d)
volatile noexcept
815 {
return __atomic_sub_fetch(&_M_p, _S_type_size(__d),
816 int(memory_order_seq_cst)); }
819 is_lock_free() const noexcept
822 return __atomic_is_lock_free(
sizeof(_M_p),
823 reinterpret_cast<void *
>(-__alignof(_M_p)));
827 is_lock_free() const volatile noexcept
830 return __atomic_is_lock_free(
sizeof(_M_p),
831 reinterpret_cast<void *
>(-__alignof(_M_p)));
834 _GLIBCXX_ALWAYS_INLINE
void
835 store(__pointer_type __p,
839 = __m & __memory_order_mask;
841 __glibcxx_assert(__b != memory_order_acquire);
842 __glibcxx_assert(__b != memory_order_acq_rel);
843 __glibcxx_assert(__b != memory_order_consume);
845 __atomic_store_n(&_M_p, __p,
int(__m));
848 _GLIBCXX_ALWAYS_INLINE
void
849 store(__pointer_type __p,
850 memory_order __m = memory_order_seq_cst)
volatile noexcept
853 = __m & __memory_order_mask;
854 __glibcxx_assert(__b != memory_order_acquire);
855 __glibcxx_assert(__b != memory_order_acq_rel);
856 __glibcxx_assert(__b != memory_order_consume);
858 __atomic_store_n(&_M_p, __p,
int(__m));
861 _GLIBCXX_ALWAYS_INLINE __pointer_type
862 load(
memory_order __m = memory_order_seq_cst)
const noexcept
865 = __m & __memory_order_mask;
866 __glibcxx_assert(__b != memory_order_release);
867 __glibcxx_assert(__b != memory_order_acq_rel);
869 return __atomic_load_n(&_M_p,
int(__m));
872 _GLIBCXX_ALWAYS_INLINE __pointer_type
873 load(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
876 = __m & __memory_order_mask;
877 __glibcxx_assert(__b != memory_order_release);
878 __glibcxx_assert(__b != memory_order_acq_rel);
880 return __atomic_load_n(&_M_p,
int(__m));
883 _GLIBCXX_ALWAYS_INLINE __pointer_type
884 exchange(__pointer_type __p,
887 return __atomic_exchange_n(&_M_p, __p,
int(__m));
891 _GLIBCXX_ALWAYS_INLINE __pointer_type
892 exchange(__pointer_type __p,
893 memory_order __m = memory_order_seq_cst)
volatile noexcept
895 return __atomic_exchange_n(&_M_p, __p,
int(__m));
898 _GLIBCXX_ALWAYS_INLINE
bool
899 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
903 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
905 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
906 int(__m1),
int(__m2));
909 _GLIBCXX_ALWAYS_INLINE
bool
910 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
914 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
916 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
917 int(__m1),
int(__m2));
920 _GLIBCXX_ALWAYS_INLINE
bool
921 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
925 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
927 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
928 int(__m1),
int(__m2));
931 _GLIBCXX_ALWAYS_INLINE
bool
932 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
936 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
938 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
939 int(__m1),
int(__m2));
942#if __glibcxx_atomic_wait
943 _GLIBCXX_ALWAYS_INLINE
void
944 wait(__pointer_type __old,
947 std::__atomic_wait_address_v(&_M_p, __old,
949 {
return this->load(__m); });
954 _GLIBCXX_ALWAYS_INLINE
void
955 notify_one() const noexcept
956 { std::__atomic_notify_address(&_M_p,
false); }
960 _GLIBCXX_ALWAYS_INLINE
void
961 notify_all() const noexcept
962 { std::__atomic_notify_address(&_M_p,
true); }
967 _GLIBCXX_ALWAYS_INLINE __pointer_type
968 fetch_add(ptrdiff_t __d,
970 {
return __atomic_fetch_add(&_M_p, _S_type_size(__d),
int(__m)); }
972 _GLIBCXX_ALWAYS_INLINE __pointer_type
973 fetch_add(ptrdiff_t __d,
974 memory_order __m = memory_order_seq_cst)
volatile noexcept
975 {
return __atomic_fetch_add(&_M_p, _S_type_size(__d),
int(__m)); }
977 _GLIBCXX_ALWAYS_INLINE __pointer_type
978 fetch_sub(ptrdiff_t __d,
980 {
return __atomic_fetch_sub(&_M_p, _S_type_size(__d),
int(__m)); }
982 _GLIBCXX_ALWAYS_INLINE __pointer_type
983 fetch_sub(ptrdiff_t __d,
984 memory_order __m = memory_order_seq_cst)
volatile noexcept
985 {
return __atomic_fetch_sub(&_M_p, _S_type_size(__d),
int(__m)); }
987#if __glibcxx_atomic_min_max
988 _GLIBCXX_ALWAYS_INLINE __pointer_type
989 fetch_min(__pointer_type __p,
991 {
return __atomic_impl::__fetch_min(&_M_p, __p, __m); }
993 _GLIBCXX_ALWAYS_INLINE __pointer_type
994 fetch_min(__pointer_type __p,
995 memory_order __m = memory_order_seq_cst)
volatile noexcept
996 {
return __atomic_impl::__fetch_min(&_M_p, __p, __m); }
998 _GLIBCXX_ALWAYS_INLINE __pointer_type
999 fetch_max(__pointer_type __p,
1001 {
return __atomic_impl::__fetch_max(&_M_p, __p, __m); }
1003 _GLIBCXX_ALWAYS_INLINE __pointer_type
1004 fetch_max(__pointer_type __p,
1005 memory_order __m = memory_order_seq_cst)
volatile noexcept
1006 {
return __atomic_impl::__fetch_max(&_M_p, __p, __m); }
1010 namespace __atomic_impl
1014 template<
typename _Tp>
1016 __maybe_has_padding()
1018#if ! __has_builtin(__builtin_clear_padding)
1020#elif __has_builtin(__has_unique_object_representations)
1021 return !__has_unique_object_representations(_Tp)
1022 && !is_same<_Tp, float>::value && !is_same<_Tp, double>::value;
1028#pragma GCC diagnostic push
1029#pragma GCC diagnostic ignored "-Wc++17-extensions"
1031 template<
typename _Tp>
1032 _GLIBCXX_ALWAYS_INLINE _GLIBCXX14_CONSTEXPR _Tp*
1033 __clear_padding(_Tp& __val)
noexcept
1036#if __has_builtin(__builtin_clear_padding)
1037 if constexpr (__atomic_impl::__maybe_has_padding<_Tp>())
1038 __builtin_clear_padding(__ptr);
1043 template<
bool _AtomicRef = false,
typename _Tp>
1044 _GLIBCXX_ALWAYS_INLINE
bool
1045 __compare_exchange(_Tp& __val, _Val<_Tp>& __e, _Val<_Tp>& __i,
1047 memory_order __s, memory_order __f)
noexcept
1049 __glibcxx_assert(__is_valid_cmpexch_failure_order(__f));
1051 using _Vp = _Val<_Tp>;
1054 if constexpr (!__atomic_impl::__maybe_has_padding<_Vp>())
1058 int(__s),
int(__f));
1060 else if constexpr (!_AtomicRef)
1063 _Vp*
const __pi = __atomic_impl::__clear_padding(__i);
1067 _Vp*
const __pexp = __atomic_impl::__clear_padding(__exp);
1071 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1072 __is_weak,
int(__s),
int(__f)))
1081 _Vp*
const __pi = __atomic_impl::__clear_padding(__i);
1087 _Vp*
const __pexp = __atomic_impl::__clear_padding(__exp);
1104 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1105 __is_weak,
int(__s),
int(__f)))
1112 if (__builtin_memcmp(__atomic_impl::__clear_padding(__orig),
1113 __atomic_impl::__clear_padding(__curr),
1124#pragma GCC diagnostic pop
1127#if __cplusplus > 201703L
1129 namespace __atomic_impl
1132 template<
typename _Tp>
1133 using _Diff = __conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>;
1135 template<
size_t _Size,
size_t _Align>
1136 _GLIBCXX_ALWAYS_INLINE
bool
1137 is_lock_free() noexcept
1140 return __atomic_is_lock_free(_Size,
reinterpret_cast<void *
>(-_Align));
1143 template<
typename _Tp>
1144 _GLIBCXX_ALWAYS_INLINE
void
1145 store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m)
noexcept
1147 __atomic_store(__ptr, __atomic_impl::__clear_padding(__t),
int(__m));
1150 template<
typename _Tp>
1151 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1152 load(
const _Tp* __ptr, memory_order __m)
noexcept
1154 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
1155 auto* __dest =
reinterpret_cast<_Val<_Tp>*
>(__buf);
1156 __atomic_load(__ptr, __dest,
int(__m));
1160 template<
typename _Tp>
1161 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1162 exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m)
noexcept
1164 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
1165 auto* __dest =
reinterpret_cast<_Val<_Tp>*
>(__buf);
1166 __atomic_exchange(__ptr, __atomic_impl::__clear_padding(__desired),
1171 template<
bool _AtomicRef = false,
typename _Tp>
1172 _GLIBCXX_ALWAYS_INLINE
bool
1173 compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
1174 _Val<_Tp> __desired, memory_order __success,
1175 memory_order __failure)
noexcept
1177 return __atomic_impl::__compare_exchange<_AtomicRef>(
1178 *__ptr, __expected, __desired,
true, __success, __failure);
1181 template<
bool _AtomicRef = false,
typename _Tp>
1182 _GLIBCXX_ALWAYS_INLINE
bool
1183 compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
1184 _Val<_Tp> __desired, memory_order __success,
1185 memory_order __failure)
noexcept
1187 return __atomic_impl::__compare_exchange<_AtomicRef>(
1188 *__ptr, __expected, __desired,
false, __success, __failure);
1191#if __glibcxx_atomic_wait
1192 template<
typename _Tp>
1193 _GLIBCXX_ALWAYS_INLINE
void
1194 wait(
const _Tp* __ptr, _Val<_Tp> __old,
1195 memory_order __m = memory_order_seq_cst)
noexcept
1197 std::__atomic_wait_address_v(__ptr, __old,
1198 [__ptr, __m]() {
return __atomic_impl::load(__ptr, __m); });
1203 template<
typename _Tp>
1204 _GLIBCXX_ALWAYS_INLINE
void
1205 notify_one(
const _Tp* __ptr)
noexcept
1206 { std::__atomic_notify_address(__ptr,
false); }
1210 template<
typename _Tp>
1211 _GLIBCXX_ALWAYS_INLINE
void
1212 notify_all(
const _Tp* __ptr)
noexcept
1213 { std::__atomic_notify_address(__ptr,
true); }
1218 template<
typename _Tp>
1219 _GLIBCXX_ALWAYS_INLINE _Tp
1220 fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m)
noexcept
1221 {
return __atomic_fetch_add(__ptr, __i,
int(__m)); }
1223 template<
typename _Tp>
1224 _GLIBCXX_ALWAYS_INLINE _Tp
1225 fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m)
noexcept
1226 {
return __atomic_fetch_sub(__ptr, __i,
int(__m)); }
1228 template<
typename _Tp>
1229 _GLIBCXX_ALWAYS_INLINE _Tp
1230 fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1231 {
return __atomic_fetch_and(__ptr, __i,
int(__m)); }
1233 template<
typename _Tp>
1234 _GLIBCXX_ALWAYS_INLINE _Tp
1235 fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1236 {
return __atomic_fetch_or(__ptr, __i,
int(__m)); }
1238 template<
typename _Tp>
1239 _GLIBCXX_ALWAYS_INLINE _Tp
1240 fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1241 {
return __atomic_fetch_xor(__ptr, __i,
int(__m)); }
1243 template<
typename _Tp>
1244 _GLIBCXX_ALWAYS_INLINE _Tp
1245 __add_fetch(_Tp* __ptr, _Diff<_Tp> __i)
noexcept
1246 {
return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1248 template<
typename _Tp>
1249 _GLIBCXX_ALWAYS_INLINE _Tp
1250 __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i)
noexcept
1251 {
return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1253 template<
typename _Tp>
1254 _GLIBCXX_ALWAYS_INLINE _Tp
1255 __and_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1256 {
return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1258 template<
typename _Tp>
1259 _GLIBCXX_ALWAYS_INLINE _Tp
1260 __or_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1261 {
return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1263 template<
typename _Tp>
1264 _GLIBCXX_ALWAYS_INLINE _Tp
1265 __xor_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1266 {
return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1268 template<
typename _Tp>
1269 concept __atomic_fetch_addable
1270 =
requires (_Tp __t) { __atomic_fetch_add(&__t, __t, 0); };
1272 template<
typename _Tp>
1274 __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1276 if constexpr (__atomic_fetch_addable<_Tp>)
1277 return __atomic_fetch_add(__ptr, __i,
int(__m));
1280 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1281 _Val<_Tp> __newval = __oldval + __i;
1282 while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
1283 memory_order_relaxed))
1284 __newval = __oldval + __i;
1289 template<
typename _Tp>
1290 concept __atomic_fetch_subtractable
1291 =
requires (_Tp __t) { __atomic_fetch_sub(&__t, __t, 0); };
1293 template<
typename _Tp>
1295 __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1297 if constexpr (__atomic_fetch_subtractable<_Tp>)
1298 return __atomic_fetch_sub(__ptr, __i,
int(__m));
1301 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1302 _Val<_Tp> __newval = __oldval - __i;
1303 while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
1304 memory_order_relaxed))
1305 __newval = __oldval - __i;
1310 template<
typename _Tp>
1311 concept __atomic_add_fetchable
1312 =
requires (_Tp __t) { __atomic_add_fetch(&__t, __t, 0); };
1314 template<
typename _Tp>
1316 __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1318 if constexpr (__atomic_add_fetchable<_Tp>)
1319 return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST);
1322 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1323 _Val<_Tp> __newval = __oldval + __i;
1324 while (!compare_exchange_weak (__ptr, __oldval, __newval,
1325 memory_order_seq_cst,
1326 memory_order_relaxed))
1327 __newval = __oldval + __i;
1332 template<
typename _Tp>
1333 concept __atomic_sub_fetchable
1334 =
requires (_Tp __t) { __atomic_sub_fetch(&__t, __t, 0); };
1336 template<
typename _Tp>
1338 __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1340 if constexpr (__atomic_sub_fetchable<_Tp>)
1341 return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST);
1344 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1345 _Val<_Tp> __newval = __oldval - __i;
1346 while (!compare_exchange_weak (__ptr, __oldval, __newval,
1347 memory_order_seq_cst,
1348 memory_order_relaxed))
1349 __newval = __oldval - __i;
1354#if __glibcxx_atomic_min_max
1355 template<
typename _Tp>
1356 concept __atomic_fetch_minmaxable
1357 =
requires (_Tp __t) {
1358 __atomic_fetch_min(&__t, __t, 0);
1359 __atomic_fetch_max(&__t, __t, 0);
1362 template<
typename _Tp>
1364 __fetch_min(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1366 if constexpr (__atomic_fetch_minmaxable<_Tp>)
1367 return __atomic_fetch_min(__ptr, __i,
int(__m));
1370 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1371 _Val<_Tp> __newval = __oldval < __i ? __oldval : __i;
1372 while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
1373 memory_order_relaxed))
1374 __newval = __oldval < __i ? __oldval : __i;
1379 template<
typename _Tp>
1381 __fetch_max(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1383 if constexpr (__atomic_fetch_minmaxable<_Tp>)
1384 return __atomic_fetch_max(__ptr, __i,
int(__m));
1387 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1388 _Val<_Tp> __newval = __oldval > __i ? __oldval : __i;
1389 while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
1390 memory_order_relaxed))
1391 __newval = __oldval > __i ? __oldval : __i;
1399 template<
typename _Fp>
1400 struct __atomic_float
1402 static_assert(is_floating_point_v<_Fp>);
1404 static constexpr size_t _S_alignment = __alignof__(_Fp);
1407 using value_type = _Fp;
1408 using difference_type = value_type;
1410 static constexpr bool is_always_lock_free
1411 = __atomic_always_lock_free(
sizeof(_Fp), 0);
1413 __atomic_float() =
default;
1416 __atomic_float(_Fp __t) : _M_fp(__t)
1418 if (!std::__is_constant_evaluated())
1419 __atomic_impl::__clear_padding(_M_fp);
1422 __atomic_float(
const __atomic_float&) =
delete;
1423 __atomic_float& operator=(
const __atomic_float&) =
delete;
1424 __atomic_float& operator=(
const __atomic_float&)
volatile =
delete;
1427 operator=(_Fp __t)
volatile noexcept
1434 operator=(_Fp __t)
noexcept
1441 is_lock_free() const volatile noexcept
1442 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1445 is_lock_free() const noexcept
1446 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1449 store(_Fp __t, memory_order __m = memory_order_seq_cst)
volatile noexcept
1450 { __atomic_impl::store(&_M_fp, __t, __m); }
1453 store(_Fp __t, memory_order __m = memory_order_seq_cst)
noexcept
1454 { __atomic_impl::store(&_M_fp, __t, __m); }
1457 load(memory_order __m = memory_order_seq_cst)
const volatile noexcept
1458 {
return __atomic_impl::load(&_M_fp, __m); }
1461 load(memory_order __m = memory_order_seq_cst)
const noexcept
1462 {
return __atomic_impl::load(&_M_fp, __m); }
1464 operator _Fp() const volatile noexcept {
return this->load(); }
1465 operator _Fp() const noexcept {
return this->load(); }
1468 exchange(_Fp __desired,
1469 memory_order __m = memory_order_seq_cst)
volatile noexcept
1470 {
return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1473 exchange(_Fp __desired,
1474 memory_order __m = memory_order_seq_cst)
noexcept
1475 {
return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1478 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1479 memory_order __success,
1480 memory_order __failure)
noexcept
1482 return __atomic_impl::compare_exchange_weak(&_M_fp,
1483 __expected, __desired,
1484 __success, __failure);
1488 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1489 memory_order __success,
1490 memory_order __failure)
volatile noexcept
1492 return __atomic_impl::compare_exchange_weak(&_M_fp,
1493 __expected, __desired,
1494 __success, __failure);
1498 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1499 memory_order __success,
1500 memory_order __failure)
noexcept
1502 return __atomic_impl::compare_exchange_strong(&_M_fp,
1503 __expected, __desired,
1504 __success, __failure);
1508 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1509 memory_order __success,
1510 memory_order __failure)
volatile noexcept
1512 return __atomic_impl::compare_exchange_strong(&_M_fp,
1513 __expected, __desired,
1514 __success, __failure);
1518 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1519 memory_order __order = memory_order_seq_cst)
1522 return compare_exchange_weak(__expected, __desired, __order,
1523 __cmpexch_failure_order(__order));
1527 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1528 memory_order __order = memory_order_seq_cst)
1531 return compare_exchange_weak(__expected, __desired, __order,
1532 __cmpexch_failure_order(__order));
1536 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1537 memory_order __order = memory_order_seq_cst)
1540 return compare_exchange_strong(__expected, __desired, __order,
1541 __cmpexch_failure_order(__order));
1545 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1546 memory_order __order = memory_order_seq_cst)
1549 return compare_exchange_strong(__expected, __desired, __order,
1550 __cmpexch_failure_order(__order));
1553#if __glibcxx_atomic_wait
1554 _GLIBCXX_ALWAYS_INLINE
void
1555 wait(_Fp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1556 { __atomic_impl::wait(&_M_fp, __old, __m); }
1560 _GLIBCXX_ALWAYS_INLINE
void
1561 notify_one() const noexcept
1562 { __atomic_impl::notify_one(&_M_fp); }
1566 _GLIBCXX_ALWAYS_INLINE
void
1567 notify_all() const noexcept
1568 { __atomic_impl::notify_all(&_M_fp); }
1574 fetch_add(value_type __i,
1575 memory_order __m = memory_order_seq_cst)
noexcept
1576 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1579 fetch_add(value_type __i,
1580 memory_order __m = memory_order_seq_cst)
volatile noexcept
1581 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1584 fetch_sub(value_type __i,
1585 memory_order __m = memory_order_seq_cst)
noexcept
1586 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1589 fetch_sub(value_type __i,
1590 memory_order __m = memory_order_seq_cst)
volatile noexcept
1591 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1593#if __glibcxx_atomic_min_max
1595 fetch_min(value_type __i,
1596 memory_order __m = memory_order_seq_cst)
noexcept
1597 {
return __atomic_impl::__fetch_min(&_M_fp, __i, __m); }
1600 fetch_min(value_type __i,
1601 memory_order __m = memory_order_seq_cst)
volatile noexcept
1602 {
return __atomic_impl::__fetch_min(&_M_fp, __i, __m); }
1605 fetch_max(value_type __i,
1606 memory_order __m = memory_order_seq_cst)
noexcept
1607 {
return __atomic_impl::__fetch_max(&_M_fp, __i, __m); }
1610 fetch_max(value_type __i,
1611 memory_order __m = memory_order_seq_cst)
volatile noexcept
1612 {
return __atomic_impl::__fetch_max(&_M_fp, __i, __m); }
1616 operator+=(value_type __i)
noexcept
1617 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1620 operator+=(value_type __i)
volatile noexcept
1621 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1624 operator-=(value_type __i)
noexcept
1625 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1628 operator-=(value_type __i)
volatile noexcept
1629 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1632 alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0);
1634#undef _GLIBCXX20_INIT
1644 template<
typename _Tp>
1645 struct __atomic_ref_base;
1647 template<
typename _Tp>
1648 struct __atomic_ref_base<const _Tp>
1651 using _Vt = remove_cv_t<_Tp>;
1653 static consteval bool
1654 _S_is_always_lock_free()
1656 if constexpr (is_pointer_v<_Vt>)
1657 return ATOMIC_POINTER_LOCK_FREE == 2;
1659 return __atomic_always_lock_free(
sizeof(_Vt), 0);
1662 static consteval int
1663 _S_required_alignment()
1665 if constexpr (is_floating_point_v<_Vt> || is_pointer_v<_Vt>)
1666 return __alignof__(_Vt);
1667 else if constexpr ((
sizeof(_Vt) & (
sizeof(_Vt) - 1)) ||
sizeof(_Vt) > 16)
1668 return alignof(_Vt);
1672 return (
sizeof(_Vt) >
alignof(_Vt)) ?
sizeof(_Vt) : alignof(_Vt);
1676 using value_type = _Vt;
1677 static_assert(is_trivially_copyable_v<value_type>);
1679 static constexpr bool is_always_lock_free = _S_is_always_lock_free();
1680 static_assert(is_always_lock_free || !is_volatile_v<_Tp>,
1681 "atomic operations on volatile T must be lock-free");
1683 static constexpr size_t required_alignment = _S_required_alignment();
1685 __atomic_ref_base() =
delete;
1686 __atomic_ref_base& operator=(
const __atomic_ref_base&) =
delete;
1689 __atomic_ref_base(
const _Tp* __ptr) noexcept
1690 : _M_ptr(
const_cast<_Tp*
>(__ptr))
1693 __atomic_ref_base(
const __atomic_ref_base&)
noexcept =
default;
1695 operator value_type() const noexcept {
return this->load(); }
1698 is_lock_free() const noexcept
1699 {
return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
1702 load(memory_order __m = memory_order_seq_cst)
const noexcept
1703 {
return __atomic_impl::load(_M_ptr, __m); }
1705#if __glibcxx_atomic_wait
1706 _GLIBCXX_ALWAYS_INLINE
void
1707 wait(value_type __old, memory_order __m = memory_order_seq_cst)
const noexcept
1710 static_assert(!is_volatile_v<_Tp>,
1711 "atomic waits on volatile are not supported");
1712 __atomic_impl::wait(_M_ptr, __old, __m);
1716#if __glibcxx_atomic_ref >= 202411L
1717 _GLIBCXX_ALWAYS_INLINE
constexpr const _Tp*
1718 address() const noexcept
1726 template<
typename _Tp>
1727 struct __atomic_ref_base
1728 : __atomic_ref_base<const _Tp>
1730 using value_type =
typename __atomic_ref_base<const _Tp>::value_type;
1733 __atomic_ref_base(_Tp* __ptr) noexcept
1734 : __atomic_ref_base<const _Tp>(__ptr)
1738 operator=(value_type __t)
const noexcept
1745 store(value_type __t, memory_order __m = memory_order_seq_cst)
const noexcept
1746 { __atomic_impl::store(this->_M_ptr, __t, __m); }
1749 exchange(value_type __desired, memory_order __m = memory_order_seq_cst)
1751 {
return __atomic_impl::exchange(this->_M_ptr, __desired, __m); }
1754 compare_exchange_weak(value_type& __expected, value_type __desired,
1755 memory_order __success,
1756 memory_order __failure)
const noexcept
1758 return __atomic_impl::compare_exchange_weak<true>(
1759 this->_M_ptr, __expected, __desired, __success, __failure);
1763 compare_exchange_strong(value_type& __expected, value_type __desired,
1764 memory_order __success,
1765 memory_order __failure)
const noexcept
1767 return __atomic_impl::compare_exchange_strong<true>(
1768 this->_M_ptr, __expected, __desired, __success, __failure);
1772 compare_exchange_weak(value_type& __expected, value_type __desired,
1773 memory_order __order = memory_order_seq_cst)
1776 return compare_exchange_weak(__expected, __desired, __order,
1777 __cmpexch_failure_order(__order));
1781 compare_exchange_strong(value_type& __expected, value_type __desired,
1782 memory_order __order = memory_order_seq_cst)
1785 return compare_exchange_strong(__expected, __desired, __order,
1786 __cmpexch_failure_order(__order));
1789#if __glibcxx_atomic_wait
1790 _GLIBCXX_ALWAYS_INLINE
void
1791 notify_one() const noexcept
1794 static_assert(!is_volatile_v<_Tp>,
1795 "atomic waits on volatile are not supported");
1796 __atomic_impl::notify_one(this->_M_ptr);
1799 _GLIBCXX_ALWAYS_INLINE
void
1800 notify_all() const noexcept
1803 static_assert(!is_volatile_v<_Tp>,
1804 "atomic waits on volatile are not supported");
1805 __atomic_impl::notify_all(this->_M_ptr);
1809#if __glibcxx_atomic_ref >= 202411L
1810 _GLIBCXX_ALWAYS_INLINE
constexpr _Tp*
1811 address() const noexcept
1812 {
return this->_M_ptr; }
1816 template<
typename _Tp,
1817 bool = is_integral_v<_Tp> && !is_same_v<remove_cv_t<_Tp>,
bool>,
1818 bool = is_floating_point_v<_Tp>,
1819 bool = is_pointer_v<_Tp>>
1820 struct __atomic_ref;
1823 template<
typename _Tp>
1824 struct __atomic_ref<_Tp, false, false, false>
1825 : __atomic_ref_base<_Tp>
1827 using __atomic_ref_base<_Tp>::__atomic_ref_base;
1828 using __atomic_ref_base<_Tp>::operator=;
1831 template<
typename _Tp>
1832 struct __atomic_ref<const _Tp, false, false, false>
1833 : __atomic_ref_base<const _Tp>
1835 using __atomic_ref_base<
const _Tp>::__atomic_ref_base;
1839 template<
typename _Tp>
1840 struct __atomic_ref<_Tp, true, false, false>
1841 : __atomic_ref_base<_Tp>
1843 using value_type =
typename __atomic_ref_base<_Tp>::value_type;
1844 using difference_type = value_type;
1846 using __atomic_ref_base<_Tp>::__atomic_ref_base;
1847 using __atomic_ref_base<_Tp>::operator=;
1850 fetch_add(value_type __i,
1851 memory_order __m = memory_order_seq_cst)
const noexcept
1852 {
return __atomic_impl::fetch_add(this->_M_ptr, __i, __m); }
1855 fetch_sub(value_type __i,
1856 memory_order __m = memory_order_seq_cst)
const noexcept
1857 {
return __atomic_impl::fetch_sub(this->_M_ptr, __i, __m); }
1860 fetch_and(value_type __i,
1861 memory_order __m = memory_order_seq_cst)
const noexcept
1862 {
return __atomic_impl::fetch_and(this->_M_ptr, __i, __m); }
1865 fetch_or(value_type __i,
1866 memory_order __m = memory_order_seq_cst)
const noexcept
1867 {
return __atomic_impl::fetch_or(this->_M_ptr, __i, __m); }
1870 fetch_xor(value_type __i,
1871 memory_order __m = memory_order_seq_cst)
const noexcept
1872 {
return __atomic_impl::fetch_xor(this->_M_ptr, __i, __m); }
1874#if __glibcxx_atomic_min_max
1876 fetch_min(value_type __i,
1877 memory_order __m = memory_order_seq_cst)
const noexcept
1878 {
return __atomic_impl::__fetch_min(this->_M_ptr, __i, __m); }
1881 fetch_max(value_type __i,
1882 memory_order __m = memory_order_seq_cst)
const noexcept
1883 {
return __atomic_impl::__fetch_max(this->_M_ptr, __i, __m); }
1886 _GLIBCXX_ALWAYS_INLINE value_type
1887 operator++(
int)
const noexcept
1888 {
return fetch_add(1); }
1890 _GLIBCXX_ALWAYS_INLINE value_type
1891 operator--(
int)
const noexcept
1892 {
return fetch_sub(1); }
1895 operator++() const noexcept
1896 {
return __atomic_impl::__add_fetch(this->_M_ptr, value_type(1)); }
1899 operator--() const noexcept
1900 {
return __atomic_impl::__sub_fetch(this->_M_ptr, value_type(1)); }
1903 operator+=(value_type __i)
const noexcept
1904 {
return __atomic_impl::__add_fetch(this->_M_ptr, __i); }
1907 operator-=(value_type __i)
const noexcept
1908 {
return __atomic_impl::__sub_fetch(this->_M_ptr, __i); }
1911 operator&=(value_type __i)
const noexcept
1912 {
return __atomic_impl::__and_fetch(this->_M_ptr, __i); }
1915 operator|=(value_type __i)
const noexcept
1916 {
return __atomic_impl::__or_fetch(this->_M_ptr, __i); }
1919 operator^=(value_type __i)
const noexcept
1920 {
return __atomic_impl::__xor_fetch(this->_M_ptr, __i); }
1923 template<
typename _Tp>
1924 struct __atomic_ref<const _Tp, true, false, false>
1925 : __atomic_ref_base<const _Tp>
1927 using difference_type =
typename __atomic_ref_base<const _Tp>::value_type;
1928 using __atomic_ref_base<
const _Tp>::__atomic_ref_base;
1932 template<
typename _Fp>
1933 struct __atomic_ref<_Fp, false, true, false>
1934 : __atomic_ref_base<_Fp>
1936 using value_type =
typename __atomic_ref_base<_Fp>::value_type;
1937 using difference_type = value_type;
1939 using __atomic_ref_base<_Fp>::__atomic_ref_base;
1940 using __atomic_ref_base<_Fp>::operator=;
1943 fetch_add(value_type __i,
1944 memory_order __m = memory_order_seq_cst)
const noexcept
1945 {
return __atomic_impl::__fetch_add_flt(this->_M_ptr, __i, __m); }
1948 fetch_sub(value_type __i,
1949 memory_order __m = memory_order_seq_cst)
const noexcept
1950 {
return __atomic_impl::__fetch_sub_flt(this->_M_ptr, __i, __m); }
1952#if __glibcxx_atomic_min_max
1954 fetch_min(value_type __i,
1955 memory_order __m = memory_order_seq_cst)
const noexcept
1956 {
return __atomic_impl::__fetch_min(this->_M_ptr, __i, __m); }
1959 fetch_max(value_type __i,
1960 memory_order __m = memory_order_seq_cst)
const noexcept
1961 {
return __atomic_impl::__fetch_max(this->_M_ptr, __i, __m); }
1965 operator+=(value_type __i)
const noexcept
1966 {
return __atomic_impl::__add_fetch_flt(this->_M_ptr, __i); }
1969 operator-=(value_type __i)
const noexcept
1970 {
return __atomic_impl::__sub_fetch_flt(this->_M_ptr, __i); }
1973 template<
typename _Fp>
1974 struct __atomic_ref<const _Fp, false, true, false>
1975 : __atomic_ref_base<const _Fp>
1977 using difference_type =
typename __atomic_ref_base<const _Fp>::value_type;
1978 using __atomic_ref_base<
const _Fp>::__atomic_ref_base;
1982 template<
typename _Pt>
1983 struct __atomic_ref<_Pt, false, false, true>
1984 : __atomic_ref_base<_Pt>
1986 using value_type =
typename __atomic_ref_base<_Pt>::value_type;
1987 using difference_type = ptrdiff_t;
1989 using __atomic_ref_base<_Pt>::__atomic_ref_base;
1990 using __atomic_ref_base<_Pt>::operator=;
1991 _GLIBCXX_ALWAYS_INLINE value_type
1992 fetch_add(difference_type __d,
1993 memory_order __m = memory_order_seq_cst)
const noexcept
1994 {
return __atomic_impl::fetch_add(this->_M_ptr, _S_type_size(__d), __m); }
1996 _GLIBCXX_ALWAYS_INLINE value_type
1997 fetch_sub(difference_type __d,
1998 memory_order __m = memory_order_seq_cst)
const noexcept
1999 {
return __atomic_impl::fetch_sub(this->_M_ptr, _S_type_size(__d), __m); }
2001#if __glibcxx_atomic_min_max
2002 _GLIBCXX_ALWAYS_INLINE value_type
2003 fetch_min(value_type __i,
2004 memory_order __m = memory_order_seq_cst)
const noexcept
2005 {
return __atomic_impl::__fetch_min(this->_M_ptr, __i, __m); }
2007 _GLIBCXX_ALWAYS_INLINE value_type
2008 fetch_max(value_type __i,
2009 memory_order __m = memory_order_seq_cst)
const noexcept
2010 {
return __atomic_impl::__fetch_max(this->_M_ptr, __i, __m); }
2014 operator++(
int)
const noexcept
2015 {
return fetch_add(1); }
2018 operator--(
int)
const noexcept
2019 {
return fetch_sub(1); }
2022 operator++() const noexcept
2024 return __atomic_impl::__add_fetch(this->_M_ptr, _S_type_size(1));
2028 operator--() const noexcept
2030 return __atomic_impl::__sub_fetch(this->_M_ptr, _S_type_size(1));
2034 operator+=(difference_type __d)
const noexcept
2036 return __atomic_impl::__add_fetch(this->_M_ptr, _S_type_size(__d));
2040 operator-=(difference_type __d)
const noexcept
2042 return __atomic_impl::__sub_fetch(this->_M_ptr, _S_type_size(__d));
2046 static constexpr ptrdiff_t
2047 _S_type_size(ptrdiff_t __d)
noexcept
2049 using _Et = remove_pointer_t<value_type>;
2050 static_assert(is_object_v<_Et>);
2051 return __d *
sizeof(_Et);
2055 template<
typename _Pt>
2056 struct __atomic_ref<const _Pt, false, false, true>
2057 : __atomic_ref_base<const _Pt>
2059 using difference_type = ptrdiff_t;
2060 using __atomic_ref_base<
const _Pt>::__atomic_ref_base;
2068_GLIBCXX_END_NAMESPACE_VERSION
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
_Tp kill_dependency(_Tp __y) noexcept
kill_dependency
memory_order
Enumeration for memory_order.
ISO C++ entities toplevel namespace is std.
constexpr bitset< _Nb > operator|(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
constexpr bitset< _Nb > operator&(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.