30#ifndef _GLIBCXX_ATOMIC_BASE_H
31#define _GLIBCXX_ATOMIC_BASE_H 1
34#pragma GCC system_header
42#if __cplusplus > 201703L && _GLIBCXX_HOSTED
46#ifndef _GLIBCXX_ALWAYS_INLINE
47#define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
52namespace std _GLIBCXX_VISIBILITY(default)
54_GLIBCXX_BEGIN_NAMESPACE_VERSION
64#if __cplusplus > 201703L
75 inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
76 inline constexpr memory_order memory_order_consume = memory_order::consume;
77 inline constexpr memory_order memory_order_acquire = memory_order::acquire;
78 inline constexpr memory_order memory_order_release = memory_order::release;
79 inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
80 inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
94 enum __memory_order_modifier
96 __memory_order_mask = 0x0ffff,
97 __memory_order_modifier_mask = 0xffff0000,
98 __memory_order_hle_acquire = 0x10000,
99 __memory_order_hle_release = 0x20000
121 return __m == memory_order_acq_rel ? memory_order_acquire
122 : __m == memory_order_release ? memory_order_relaxed : __m;
128 return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
129 | __memory_order_modifier(__m & __memory_order_modifier_mask));
133 __is_valid_cmpexch_failure_order(
memory_order __m)
noexcept
135 return (__m & __memory_order_mask) != memory_order_release
136 && (__m & __memory_order_mask) != memory_order_acq_rel;
140 template<
typename _IntTp>
141 struct __atomic_base;
145 _GLIBCXX_ALWAYS_INLINE
void
147 { __atomic_thread_fence(
int(__m)); }
149 _GLIBCXX_ALWAYS_INLINE
void
151 { __atomic_signal_fence(
int(__m)); }
154 template<
typename _Tp>
163#if __glibcxx_atomic_value_initialization
164# define _GLIBCXX20_INIT(I) = I
166# define _GLIBCXX20_INIT(I)
170#define ATOMIC_VAR_INIT(_VI) { _VI }
172 template<
typename _Tp>
175 template<
typename _Tp>
179#if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
180 typedef bool __atomic_flag_data_type;
182 typedef unsigned char __atomic_flag_data_type;
197 _GLIBCXX_BEGIN_EXTERN_C
199 struct __atomic_flag_base
201 __atomic_flag_data_type _M_i _GLIBCXX20_INIT({});
204 _GLIBCXX_END_EXTERN_C
208#define ATOMIC_FLAG_INIT { 0 }
211 struct atomic_flag :
public __atomic_flag_base
213 atomic_flag()
noexcept =
default;
214 ~atomic_flag()
noexcept =
default;
215 atomic_flag(
const atomic_flag&) =
delete;
216 atomic_flag& operator=(
const atomic_flag&) =
delete;
217 atomic_flag& operator=(
const atomic_flag&)
volatile =
delete;
220 constexpr atomic_flag(
bool __i) noexcept
221 : __atomic_flag_base{ _S_init(__i) }
224 _GLIBCXX_ALWAYS_INLINE
bool
225 test_and_set(
memory_order __m = memory_order_seq_cst)
noexcept
227 return __atomic_test_and_set (&_M_i,
int(__m));
230 _GLIBCXX_ALWAYS_INLINE
bool
231 test_and_set(
memory_order __m = memory_order_seq_cst)
volatile noexcept
233 return __atomic_test_and_set (&_M_i,
int(__m));
236#ifdef __glibcxx_atomic_flag_test
237 _GLIBCXX_ALWAYS_INLINE
bool
238 test(
memory_order __m = memory_order_seq_cst)
const noexcept
240 __atomic_flag_data_type __v;
241 __atomic_load(&_M_i, &__v,
int(__m));
242 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
245 _GLIBCXX_ALWAYS_INLINE
bool
246 test(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
248 __atomic_flag_data_type __v;
249 __atomic_load(&_M_i, &__v,
int(__m));
250 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
254#if __glibcxx_atomic_wait
255 _GLIBCXX_ALWAYS_INLINE
void
259 const __atomic_flag_data_type __v
260 = __old ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0;
262 std::__atomic_wait_address_v(&_M_i, __v,
263 [__m,
this] {
return __atomic_load_n(&_M_i,
int(__m)); });
268 _GLIBCXX_ALWAYS_INLINE
void
269 notify_one()
noexcept
270 { std::__atomic_notify_address(&_M_i,
false); }
274 _GLIBCXX_ALWAYS_INLINE
void
275 notify_all()
noexcept
276 { std::__atomic_notify_address(&_M_i,
true); }
281 _GLIBCXX_ALWAYS_INLINE
void
285 = __m & __memory_order_mask;
286 __glibcxx_assert(__b != memory_order_consume);
287 __glibcxx_assert(__b != memory_order_acquire);
288 __glibcxx_assert(__b != memory_order_acq_rel);
290 __atomic_clear (&_M_i,
int(__m));
293 _GLIBCXX_ALWAYS_INLINE
void
294 clear(
memory_order __m = memory_order_seq_cst)
volatile noexcept
297 = __m & __memory_order_mask;
298 __glibcxx_assert(__b != memory_order_consume);
299 __glibcxx_assert(__b != memory_order_acquire);
300 __glibcxx_assert(__b != memory_order_acq_rel);
302 __atomic_clear (&_M_i,
int(__m));
306 static constexpr __atomic_flag_data_type
308 {
return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
338 namespace __atomic_impl
340 template<
typename _Tp>
341 using _Val =
typename remove_volatile<_Tp>::type;
343#if __glibcxx_atomic_min_max
344 template<
typename _Tp>
346 __fetch_min(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m)
noexcept;
348 template<
typename _Tp>
350 __fetch_max(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m)
noexcept;
354 template<
typename _ITp>
357 using value_type = _ITp;
358 using difference_type = value_type;
361 typedef _ITp __int_type;
363 static constexpr int _S_alignment =
364 sizeof(_ITp) >
alignof(_ITp) ?
sizeof(_ITp) : alignof(_ITp);
366 alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0);
369 __atomic_base() noexcept = default;
370 ~__atomic_base() noexcept = default;
371 __atomic_base(const __atomic_base&) = delete;
372 __atomic_base& operator=(const __atomic_base&) = delete;
373 __atomic_base& operator=(const __atomic_base&) volatile = delete;
375 constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
377 operator __int_type() const noexcept
380 operator __int_type() const volatile noexcept
384 operator=(__int_type __i)
noexcept
391 operator=(__int_type __i)
volatile noexcept
398 operator++(
int)
noexcept
399 {
return fetch_add(1); }
402 operator++(
int)
volatile noexcept
403 {
return fetch_add(1); }
406 operator--(
int)
noexcept
407 {
return fetch_sub(1); }
410 operator--(
int)
volatile noexcept
411 {
return fetch_sub(1); }
414 operator++() noexcept
415 {
return __atomic_add_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
418 operator++() volatile noexcept
419 {
return __atomic_add_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
422 operator--() noexcept
423 {
return __atomic_sub_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
426 operator--() volatile noexcept
427 {
return __atomic_sub_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
430 operator+=(__int_type __i)
noexcept
431 {
return __atomic_add_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
434 operator+=(__int_type __i)
volatile noexcept
435 {
return __atomic_add_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
438 operator-=(__int_type __i)
noexcept
439 {
return __atomic_sub_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
442 operator-=(__int_type __i)
volatile noexcept
443 {
return __atomic_sub_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
446 operator&=(__int_type __i)
noexcept
447 {
return __atomic_and_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
450 operator&=(__int_type __i)
volatile noexcept
451 {
return __atomic_and_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
454 operator|=(__int_type __i)
noexcept
455 {
return __atomic_or_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
458 operator|=(__int_type __i)
volatile noexcept
459 {
return __atomic_or_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
462 operator^=(__int_type __i)
noexcept
463 {
return __atomic_xor_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
466 operator^=(__int_type __i)
volatile noexcept
467 {
return __atomic_xor_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
470 is_lock_free() const noexcept
473 return __atomic_is_lock_free(
sizeof(_M_i),
474 reinterpret_cast<void *
>(-_S_alignment));
478 is_lock_free() const volatile noexcept
481 return __atomic_is_lock_free(
sizeof(_M_i),
482 reinterpret_cast<void *
>(-_S_alignment));
485 _GLIBCXX_ALWAYS_INLINE
void
486 store(__int_type __i, memory_order __m = memory_order_seq_cst)
noexcept
489 = __m & __memory_order_mask;
490 __glibcxx_assert(__b != memory_order_acquire);
491 __glibcxx_assert(__b != memory_order_acq_rel);
492 __glibcxx_assert(__b != memory_order_consume);
494 __atomic_store_n(&_M_i, __i,
int(__m));
497 _GLIBCXX_ALWAYS_INLINE
void
498 store(__int_type __i,
499 memory_order __m = memory_order_seq_cst)
volatile noexcept
502 = __m & __memory_order_mask;
503 __glibcxx_assert(__b != memory_order_acquire);
504 __glibcxx_assert(__b != memory_order_acq_rel);
505 __glibcxx_assert(__b != memory_order_consume);
507 __atomic_store_n(&_M_i, __i,
int(__m));
510 _GLIBCXX_ALWAYS_INLINE __int_type
511 load(memory_order __m = memory_order_seq_cst)
const noexcept
514 = __m & __memory_order_mask;
515 __glibcxx_assert(__b != memory_order_release);
516 __glibcxx_assert(__b != memory_order_acq_rel);
518 return __atomic_load_n(&_M_i,
int(__m));
521 _GLIBCXX_ALWAYS_INLINE __int_type
522 load(memory_order __m = memory_order_seq_cst)
const volatile noexcept
525 = __m & __memory_order_mask;
526 __glibcxx_assert(__b != memory_order_release);
527 __glibcxx_assert(__b != memory_order_acq_rel);
529 return __atomic_load_n(&_M_i,
int(__m));
532 _GLIBCXX_ALWAYS_INLINE __int_type
533 exchange(__int_type __i,
534 memory_order __m = memory_order_seq_cst)
noexcept
536 return __atomic_exchange_n(&_M_i, __i,
int(__m));
540 _GLIBCXX_ALWAYS_INLINE __int_type
541 exchange(__int_type __i,
542 memory_order __m = memory_order_seq_cst)
volatile noexcept
544 return __atomic_exchange_n(&_M_i, __i,
int(__m));
547 _GLIBCXX_ALWAYS_INLINE
bool
548 compare_exchange_weak(__int_type& __i1, __int_type __i2,
549 memory_order __m1, memory_order __m2)
noexcept
551 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
553 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
554 int(__m1),
int(__m2));
557 _GLIBCXX_ALWAYS_INLINE
bool
558 compare_exchange_weak(__int_type& __i1, __int_type __i2,
560 memory_order __m2)
volatile noexcept
562 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
564 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
565 int(__m1),
int(__m2));
568 _GLIBCXX_ALWAYS_INLINE
bool
569 compare_exchange_weak(__int_type& __i1, __int_type __i2,
570 memory_order __m = memory_order_seq_cst)
noexcept
572 return compare_exchange_weak(__i1, __i2, __m,
573 __cmpexch_failure_order(__m));
576 _GLIBCXX_ALWAYS_INLINE
bool
577 compare_exchange_weak(__int_type& __i1, __int_type __i2,
578 memory_order __m = memory_order_seq_cst)
volatile noexcept
580 return compare_exchange_weak(__i1, __i2, __m,
581 __cmpexch_failure_order(__m));
584 _GLIBCXX_ALWAYS_INLINE
bool
585 compare_exchange_strong(__int_type& __i1, __int_type __i2,
586 memory_order __m1, memory_order __m2)
noexcept
588 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
590 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
591 int(__m1),
int(__m2));
594 _GLIBCXX_ALWAYS_INLINE
bool
595 compare_exchange_strong(__int_type& __i1, __int_type __i2,
597 memory_order __m2)
volatile noexcept
599 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
601 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
602 int(__m1),
int(__m2));
605 _GLIBCXX_ALWAYS_INLINE
bool
606 compare_exchange_strong(__int_type& __i1, __int_type __i2,
607 memory_order __m = memory_order_seq_cst)
noexcept
609 return compare_exchange_strong(__i1, __i2, __m,
610 __cmpexch_failure_order(__m));
613 _GLIBCXX_ALWAYS_INLINE
bool
614 compare_exchange_strong(__int_type& __i1, __int_type __i2,
615 memory_order __m = memory_order_seq_cst)
volatile noexcept
617 return compare_exchange_strong(__i1, __i2, __m,
618 __cmpexch_failure_order(__m));
621#if __glibcxx_atomic_wait
622 _GLIBCXX_ALWAYS_INLINE
void
623 wait(__int_type __old,
624 memory_order __m = memory_order_seq_cst)
const noexcept
626 std::__atomic_wait_address_v(&_M_i, __old,
627 [__m,
this] {
return this->load(__m); });
632 _GLIBCXX_ALWAYS_INLINE
void
633 notify_one() noexcept
634 { std::__atomic_notify_address(&_M_i,
false); }
638 _GLIBCXX_ALWAYS_INLINE
void
639 notify_all() noexcept
640 { std::__atomic_notify_address(&_M_i,
true); }
645 _GLIBCXX_ALWAYS_INLINE __int_type
646 fetch_add(__int_type __i,
647 memory_order __m = memory_order_seq_cst)
noexcept
648 {
return __atomic_fetch_add(&_M_i, __i,
int(__m)); }
650 _GLIBCXX_ALWAYS_INLINE __int_type
651 fetch_add(__int_type __i,
652 memory_order __m = memory_order_seq_cst)
volatile noexcept
653 {
return __atomic_fetch_add(&_M_i, __i,
int(__m)); }
655 _GLIBCXX_ALWAYS_INLINE __int_type
656 fetch_sub(__int_type __i,
657 memory_order __m = memory_order_seq_cst)
noexcept
658 {
return __atomic_fetch_sub(&_M_i, __i,
int(__m)); }
660 _GLIBCXX_ALWAYS_INLINE __int_type
661 fetch_sub(__int_type __i,
662 memory_order __m = memory_order_seq_cst)
volatile noexcept
663 {
return __atomic_fetch_sub(&_M_i, __i,
int(__m)); }
665 _GLIBCXX_ALWAYS_INLINE __int_type
666 fetch_and(__int_type __i,
667 memory_order __m = memory_order_seq_cst)
noexcept
668 {
return __atomic_fetch_and(&_M_i, __i,
int(__m)); }
670 _GLIBCXX_ALWAYS_INLINE __int_type
671 fetch_and(__int_type __i,
672 memory_order __m = memory_order_seq_cst)
volatile noexcept
673 {
return __atomic_fetch_and(&_M_i, __i,
int(__m)); }
675 _GLIBCXX_ALWAYS_INLINE __int_type
676 fetch_or(__int_type __i,
677 memory_order __m = memory_order_seq_cst)
noexcept
678 {
return __atomic_fetch_or(&_M_i, __i,
int(__m)); }
680 _GLIBCXX_ALWAYS_INLINE __int_type
681 fetch_or(__int_type __i,
682 memory_order __m = memory_order_seq_cst)
volatile noexcept
683 {
return __atomic_fetch_or(&_M_i, __i,
int(__m)); }
685 _GLIBCXX_ALWAYS_INLINE __int_type
686 fetch_xor(__int_type __i,
687 memory_order __m = memory_order_seq_cst)
noexcept
688 {
return __atomic_fetch_xor(&_M_i, __i,
int(__m)); }
690 _GLIBCXX_ALWAYS_INLINE __int_type
691 fetch_xor(__int_type __i,
692 memory_order __m = memory_order_seq_cst)
volatile noexcept
693 {
return __atomic_fetch_xor(&_M_i, __i,
int(__m)); }
695#if __glibcxx_atomic_min_max
696 _GLIBCXX_ALWAYS_INLINE __int_type
697 fetch_min(__int_type __i,
698 memory_order __m = memory_order_seq_cst)
noexcept
699 {
return __atomic_impl::__fetch_min(&_M_i, __i, __m); }
701 _GLIBCXX_ALWAYS_INLINE __int_type
702 fetch_min(__int_type __i,
703 memory_order __m = memory_order_seq_cst)
volatile noexcept
704 {
return __atomic_impl::__fetch_min(&_M_i, __i, __m); }
706 _GLIBCXX_ALWAYS_INLINE __int_type
707 fetch_max(__int_type __i,
708 memory_order __m = memory_order_seq_cst)
noexcept
709 {
return __atomic_impl::__fetch_max(&_M_i, __i, __m); }
711 _GLIBCXX_ALWAYS_INLINE __int_type
712 fetch_max(__int_type __i,
713 memory_order __m = memory_order_seq_cst)
volatile noexcept
714 {
return __atomic_impl::__fetch_max(&_M_i, __i, __m); }
720 template<
typename _PTp>
721 struct __atomic_base<_PTp*>
724 typedef _PTp* __pointer_type;
726 __pointer_type _M_p _GLIBCXX20_INIT(
nullptr);
728 static constexpr ptrdiff_t
729 _S_type_size(ptrdiff_t __d)
730 {
return __d *
sizeof(_PTp); }
733 __atomic_base() noexcept = default;
734 ~__atomic_base() noexcept = default;
735 __atomic_base(const __atomic_base&) = delete;
736 __atomic_base& operator=(const __atomic_base&) = delete;
737 __atomic_base& operator=(const __atomic_base&) volatile = delete;
740 constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
742 operator __pointer_type() const noexcept
745 operator __pointer_type() const volatile noexcept
749 operator=(__pointer_type __p)
noexcept
756 operator=(__pointer_type __p)
volatile noexcept
763 operator++(
int)
noexcept
764 {
return fetch_add(1); }
767 operator++(
int)
volatile noexcept
768 {
return fetch_add(1); }
771 operator--(
int)
noexcept
772 {
return fetch_sub(1); }
775 operator--(
int)
volatile noexcept
776 {
return fetch_sub(1); }
779 operator++() noexcept
780 {
return __atomic_add_fetch(&_M_p, _S_type_size(1),
781 int(memory_order_seq_cst)); }
784 operator++() volatile noexcept
785 {
return __atomic_add_fetch(&_M_p, _S_type_size(1),
786 int(memory_order_seq_cst)); }
789 operator--() noexcept
790 {
return __atomic_sub_fetch(&_M_p, _S_type_size(1),
791 int(memory_order_seq_cst)); }
794 operator--() volatile noexcept
795 {
return __atomic_sub_fetch(&_M_p, _S_type_size(1),
796 int(memory_order_seq_cst)); }
799 operator+=(ptrdiff_t __d)
noexcept
800 {
return __atomic_add_fetch(&_M_p, _S_type_size(__d),
801 int(memory_order_seq_cst)); }
804 operator+=(ptrdiff_t __d)
volatile noexcept
805 {
return __atomic_add_fetch(&_M_p, _S_type_size(__d),
806 int(memory_order_seq_cst)); }
809 operator-=(ptrdiff_t __d)
noexcept
810 {
return __atomic_sub_fetch(&_M_p, _S_type_size(__d),
811 int(memory_order_seq_cst)); }
814 operator-=(ptrdiff_t __d)
volatile noexcept
815 {
return __atomic_sub_fetch(&_M_p, _S_type_size(__d),
816 int(memory_order_seq_cst)); }
819 is_lock_free() const noexcept
822 return __atomic_is_lock_free(
sizeof(_M_p),
823 reinterpret_cast<void *
>(-__alignof(_M_p)));
827 is_lock_free() const volatile noexcept
830 return __atomic_is_lock_free(
sizeof(_M_p),
831 reinterpret_cast<void *
>(-__alignof(_M_p)));
834 _GLIBCXX_ALWAYS_INLINE
void
835 store(__pointer_type __p,
839 = __m & __memory_order_mask;
841 __glibcxx_assert(__b != memory_order_acquire);
842 __glibcxx_assert(__b != memory_order_acq_rel);
843 __glibcxx_assert(__b != memory_order_consume);
845 __atomic_store_n(&_M_p, __p,
int(__m));
848 _GLIBCXX_ALWAYS_INLINE
void
849 store(__pointer_type __p,
850 memory_order __m = memory_order_seq_cst)
volatile noexcept
853 = __m & __memory_order_mask;
854 __glibcxx_assert(__b != memory_order_acquire);
855 __glibcxx_assert(__b != memory_order_acq_rel);
856 __glibcxx_assert(__b != memory_order_consume);
858 __atomic_store_n(&_M_p, __p,
int(__m));
861 _GLIBCXX_ALWAYS_INLINE __pointer_type
862 load(
memory_order __m = memory_order_seq_cst)
const noexcept
865 = __m & __memory_order_mask;
866 __glibcxx_assert(__b != memory_order_release);
867 __glibcxx_assert(__b != memory_order_acq_rel);
869 return __atomic_load_n(&_M_p,
int(__m));
872 _GLIBCXX_ALWAYS_INLINE __pointer_type
873 load(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
876 = __m & __memory_order_mask;
877 __glibcxx_assert(__b != memory_order_release);
878 __glibcxx_assert(__b != memory_order_acq_rel);
880 return __atomic_load_n(&_M_p,
int(__m));
883 _GLIBCXX_ALWAYS_INLINE __pointer_type
884 exchange(__pointer_type __p,
887 return __atomic_exchange_n(&_M_p, __p,
int(__m));
891 _GLIBCXX_ALWAYS_INLINE __pointer_type
892 exchange(__pointer_type __p,
893 memory_order __m = memory_order_seq_cst)
volatile noexcept
895 return __atomic_exchange_n(&_M_p, __p,
int(__m));
898 _GLIBCXX_ALWAYS_INLINE
bool
899 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
903 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
905 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
906 int(__m1),
int(__m2));
909 _GLIBCXX_ALWAYS_INLINE
bool
910 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
914 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
916 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
917 int(__m1),
int(__m2));
920 _GLIBCXX_ALWAYS_INLINE
bool
921 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
925 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
927 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
928 int(__m1),
int(__m2));
931 _GLIBCXX_ALWAYS_INLINE
bool
932 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
936 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
938 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
939 int(__m1),
int(__m2));
942#if __glibcxx_atomic_wait
943 _GLIBCXX_ALWAYS_INLINE
void
944 wait(__pointer_type __old,
947 std::__atomic_wait_address_v(&_M_p, __old,
949 {
return this->load(__m); });
954 _GLIBCXX_ALWAYS_INLINE
void
955 notify_one() const noexcept
956 { std::__atomic_notify_address(&_M_p,
false); }
960 _GLIBCXX_ALWAYS_INLINE
void
961 notify_all() const noexcept
962 { std::__atomic_notify_address(&_M_p,
true); }
967 _GLIBCXX_ALWAYS_INLINE __pointer_type
968 fetch_add(ptrdiff_t __d,
970 {
return __atomic_fetch_add(&_M_p, _S_type_size(__d),
int(__m)); }
972 _GLIBCXX_ALWAYS_INLINE __pointer_type
973 fetch_add(ptrdiff_t __d,
974 memory_order __m = memory_order_seq_cst)
volatile noexcept
975 {
return __atomic_fetch_add(&_M_p, _S_type_size(__d),
int(__m)); }
977 _GLIBCXX_ALWAYS_INLINE __pointer_type
978 fetch_sub(ptrdiff_t __d,
980 {
return __atomic_fetch_sub(&_M_p, _S_type_size(__d),
int(__m)); }
982 _GLIBCXX_ALWAYS_INLINE __pointer_type
983 fetch_sub(ptrdiff_t __d,
984 memory_order __m = memory_order_seq_cst)
volatile noexcept
985 {
return __atomic_fetch_sub(&_M_p, _S_type_size(__d),
int(__m)); }
988 namespace __atomic_impl
992 template<
typename _Tp>
994 __maybe_has_padding()
996#if ! __has_builtin(__builtin_clear_padding)
998#elif __has_builtin(__has_unique_object_representations)
999 return !__has_unique_object_representations(_Tp)
1000 && !is_same<_Tp, float>::value && !is_same<_Tp, double>::value;
1006 template<
typename _Tp>
1007 _GLIBCXX_ALWAYS_INLINE _GLIBCXX14_CONSTEXPR _Tp*
1008 __clear_padding(_Tp& __val)
noexcept
1011#if __has_builtin(__builtin_clear_padding)
1012 if _GLIBCXX17_CONSTEXPR (__atomic_impl::__maybe_has_padding<_Tp>())
1013 __builtin_clear_padding(__ptr);
1018#pragma GCC diagnostic push
1019#pragma GCC diagnostic ignored "-Wc++17-extensions"
1021 template<
bool _AtomicRef = false,
typename _Tp>
1022 _GLIBCXX_ALWAYS_INLINE
bool
1023 __compare_exchange(_Tp& __val, _Val<_Tp>& __e, _Val<_Tp>& __i,
1025 memory_order __s, memory_order __f)
noexcept
1027 __glibcxx_assert(__is_valid_cmpexch_failure_order(__f));
1029 using _Vp = _Val<_Tp>;
1032 if constexpr (!__atomic_impl::__maybe_has_padding<_Vp>())
1036 int(__s),
int(__f));
1038 else if constexpr (!_AtomicRef)
1041 _Vp*
const __pi = __atomic_impl::__clear_padding(__i);
1045 _Vp*
const __pexp = __atomic_impl::__clear_padding(__exp);
1049 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1050 __is_weak,
int(__s),
int(__f)))
1059 _Vp*
const __pi = __atomic_impl::__clear_padding(__i);
1065 _Vp*
const __pexp = __atomic_impl::__clear_padding(__exp);
1082 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1083 __is_weak,
int(__s),
int(__f)))
1090 if (__builtin_memcmp(__atomic_impl::__clear_padding(__orig),
1091 __atomic_impl::__clear_padding(__curr),
1102#pragma GCC diagnostic pop
1105#if __cplusplus > 201703L
1107 namespace __atomic_impl
1110 template<
typename _Tp>
1111 using _Diff = __conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>;
1113 template<
size_t _Size,
size_t _Align>
1114 _GLIBCXX_ALWAYS_INLINE
bool
1115 is_lock_free() noexcept
1118 return __atomic_is_lock_free(_Size,
reinterpret_cast<void *
>(-_Align));
1121 template<
typename _Tp>
1122 _GLIBCXX_ALWAYS_INLINE
void
1123 store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m)
noexcept
1125 __atomic_store(__ptr, __atomic_impl::__clear_padding(__t),
int(__m));
1128 template<
typename _Tp>
1129 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1130 load(
const _Tp* __ptr, memory_order __m)
noexcept
1132 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
1133 auto* __dest =
reinterpret_cast<_Val<_Tp>*
>(__buf);
1134 __atomic_load(__ptr, __dest,
int(__m));
1138 template<
typename _Tp>
1139 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1140 exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m)
noexcept
1142 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
1143 auto* __dest =
reinterpret_cast<_Val<_Tp>*
>(__buf);
1144 __atomic_exchange(__ptr, __atomic_impl::__clear_padding(__desired),
1149 template<
bool _AtomicRef = false,
typename _Tp>
1150 _GLIBCXX_ALWAYS_INLINE
bool
1151 compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
1152 _Val<_Tp> __desired, memory_order __success,
1153 memory_order __failure)
noexcept
1155 return __atomic_impl::__compare_exchange<_AtomicRef>(
1156 *__ptr, __expected, __desired,
true, __success, __failure);
1159 template<
bool _AtomicRef = false,
typename _Tp>
1160 _GLIBCXX_ALWAYS_INLINE
bool
1161 compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
1162 _Val<_Tp> __desired, memory_order __success,
1163 memory_order __failure)
noexcept
1165 return __atomic_impl::__compare_exchange<_AtomicRef>(
1166 *__ptr, __expected, __desired,
false, __success, __failure);
1169#if __glibcxx_atomic_wait
1170 template<
typename _Tp>
1171 _GLIBCXX_ALWAYS_INLINE
void
1172 wait(
const _Tp* __ptr, _Val<_Tp> __old,
1173 memory_order __m = memory_order_seq_cst)
noexcept
1175 std::__atomic_wait_address_v(__ptr, __old,
1176 [__ptr, __m]() {
return __atomic_impl::load(__ptr, __m); });
1181 template<
typename _Tp>
1182 _GLIBCXX_ALWAYS_INLINE
void
1183 notify_one(
const _Tp* __ptr)
noexcept
1184 { std::__atomic_notify_address(__ptr,
false); }
1188 template<
typename _Tp>
1189 _GLIBCXX_ALWAYS_INLINE
void
1190 notify_all(
const _Tp* __ptr)
noexcept
1191 { std::__atomic_notify_address(__ptr,
true); }
1196 template<
typename _Tp>
1197 _GLIBCXX_ALWAYS_INLINE _Tp
1198 fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m)
noexcept
1199 {
return __atomic_fetch_add(__ptr, __i,
int(__m)); }
1201 template<
typename _Tp>
1202 _GLIBCXX_ALWAYS_INLINE _Tp
1203 fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m)
noexcept
1204 {
return __atomic_fetch_sub(__ptr, __i,
int(__m)); }
1206 template<
typename _Tp>
1207 _GLIBCXX_ALWAYS_INLINE _Tp
1208 fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1209 {
return __atomic_fetch_and(__ptr, __i,
int(__m)); }
1211 template<
typename _Tp>
1212 _GLIBCXX_ALWAYS_INLINE _Tp
1213 fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1214 {
return __atomic_fetch_or(__ptr, __i,
int(__m)); }
1216 template<
typename _Tp>
1217 _GLIBCXX_ALWAYS_INLINE _Tp
1218 fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1219 {
return __atomic_fetch_xor(__ptr, __i,
int(__m)); }
1221 template<
typename _Tp>
1222 _GLIBCXX_ALWAYS_INLINE _Tp
1223 __add_fetch(_Tp* __ptr, _Diff<_Tp> __i)
noexcept
1224 {
return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1226 template<
typename _Tp>
1227 _GLIBCXX_ALWAYS_INLINE _Tp
1228 __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i)
noexcept
1229 {
return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1231 template<
typename _Tp>
1232 _GLIBCXX_ALWAYS_INLINE _Tp
1233 __and_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1234 {
return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1236 template<
typename _Tp>
1237 _GLIBCXX_ALWAYS_INLINE _Tp
1238 __or_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1239 {
return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1241 template<
typename _Tp>
1242 _GLIBCXX_ALWAYS_INLINE _Tp
1243 __xor_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1244 {
return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1246 template<
typename _Tp>
1247 concept __atomic_fetch_addable
1248 =
requires (_Tp __t) { __atomic_fetch_add(&__t, __t, 0); };
1250 template<
typename _Tp>
1252 __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1254 if constexpr (__atomic_fetch_addable<_Tp>)
1255 return __atomic_fetch_add(__ptr, __i,
int(__m));
1258 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1259 _Val<_Tp> __newval = __oldval + __i;
1260 while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
1261 memory_order_relaxed))
1262 __newval = __oldval + __i;
1267 template<
typename _Tp>
1268 concept __atomic_fetch_subtractable
1269 =
requires (_Tp __t) { __atomic_fetch_sub(&__t, __t, 0); };
1271 template<
typename _Tp>
1273 __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1275 if constexpr (__atomic_fetch_subtractable<_Tp>)
1276 return __atomic_fetch_sub(__ptr, __i,
int(__m));
1279 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1280 _Val<_Tp> __newval = __oldval - __i;
1281 while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
1282 memory_order_relaxed))
1283 __newval = __oldval - __i;
1288 template<
typename _Tp>
1289 concept __atomic_add_fetchable
1290 =
requires (_Tp __t) { __atomic_add_fetch(&__t, __t, 0); };
1292 template<
typename _Tp>
1294 __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1296 if constexpr (__atomic_add_fetchable<_Tp>)
1297 return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST);
1300 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1301 _Val<_Tp> __newval = __oldval + __i;
1302 while (!compare_exchange_weak (__ptr, __oldval, __newval,
1303 memory_order_seq_cst,
1304 memory_order_relaxed))
1305 __newval = __oldval + __i;
1310 template<
typename _Tp>
1311 concept __atomic_sub_fetchable
1312 =
requires (_Tp __t) { __atomic_sub_fetch(&__t, __t, 0); };
1314 template<
typename _Tp>
1316 __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1318 if constexpr (__atomic_sub_fetchable<_Tp>)
1319 return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST);
1322 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1323 _Val<_Tp> __newval = __oldval - __i;
1324 while (!compare_exchange_weak (__ptr, __oldval, __newval,
1325 memory_order_seq_cst,
1326 memory_order_relaxed))
1327 __newval = __oldval - __i;
1332#if __glibcxx_atomic_min_max
1333 template<
typename _Tp>
1334 concept __atomic_fetch_minmaxable
1335 =
requires (_Tp __t) {
1336 __atomic_fetch_min(&__t, __t, 0);
1337 __atomic_fetch_max(&__t, __t, 0);
1340 template<
typename _Tp>
1342 __fetch_min(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1344 if constexpr (__atomic_fetch_minmaxable<_Tp>)
1345 return __atomic_fetch_min(__ptr, __i,
int(__m));
1348 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1349 _Val<_Tp> __newval = __oldval < __i ? __oldval : __i;
1350 while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
1351 memory_order_relaxed))
1352 __newval = __oldval < __i ? __oldval : __i;
1357 template<
typename _Tp>
1359 __fetch_max(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1361 if constexpr (__atomic_fetch_minmaxable<_Tp>)
1362 return __atomic_fetch_max(__ptr, __i,
int(__m));
1365 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1366 _Val<_Tp> __newval = __oldval > __i ? __oldval : __i;
1367 while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
1368 memory_order_relaxed))
1369 __newval = __oldval > __i ? __oldval : __i;
1377 template<
typename _Fp>
1378 struct __atomic_float
1380 static_assert(is_floating_point_v<_Fp>);
1382 static constexpr size_t _S_alignment = __alignof__(_Fp);
1385 using value_type = _Fp;
1386 using difference_type = value_type;
1388 static constexpr bool is_always_lock_free
1389 = __atomic_always_lock_free(
sizeof(_Fp), 0);
1391 __atomic_float() =
default;
1394 __atomic_float(_Fp __t) : _M_fp(__t)
1395 { __atomic_impl::__clear_padding(_M_fp); }
1397 __atomic_float(
const __atomic_float&) =
delete;
1398 __atomic_float& operator=(
const __atomic_float&) =
delete;
1399 __atomic_float& operator=(
const __atomic_float&)
volatile =
delete;
1402 operator=(_Fp __t)
volatile noexcept
1409 operator=(_Fp __t)
noexcept
1416 is_lock_free() const volatile noexcept
1417 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1420 is_lock_free() const noexcept
1421 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1424 store(_Fp __t, memory_order __m = memory_order_seq_cst)
volatile noexcept
1425 { __atomic_impl::store(&_M_fp, __t, __m); }
1428 store(_Fp __t, memory_order __m = memory_order_seq_cst)
noexcept
1429 { __atomic_impl::store(&_M_fp, __t, __m); }
1432 load(memory_order __m = memory_order_seq_cst)
const volatile noexcept
1433 {
return __atomic_impl::load(&_M_fp, __m); }
1436 load(memory_order __m = memory_order_seq_cst)
const noexcept
1437 {
return __atomic_impl::load(&_M_fp, __m); }
1439 operator _Fp() const volatile noexcept {
return this->load(); }
1440 operator _Fp() const noexcept {
return this->load(); }
1443 exchange(_Fp __desired,
1444 memory_order __m = memory_order_seq_cst)
volatile noexcept
1445 {
return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1448 exchange(_Fp __desired,
1449 memory_order __m = memory_order_seq_cst)
noexcept
1450 {
return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1453 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1454 memory_order __success,
1455 memory_order __failure)
noexcept
1457 return __atomic_impl::compare_exchange_weak(&_M_fp,
1458 __expected, __desired,
1459 __success, __failure);
1463 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1464 memory_order __success,
1465 memory_order __failure)
volatile noexcept
1467 return __atomic_impl::compare_exchange_weak(&_M_fp,
1468 __expected, __desired,
1469 __success, __failure);
1473 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1474 memory_order __success,
1475 memory_order __failure)
noexcept
1477 return __atomic_impl::compare_exchange_strong(&_M_fp,
1478 __expected, __desired,
1479 __success, __failure);
1483 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1484 memory_order __success,
1485 memory_order __failure)
volatile noexcept
1487 return __atomic_impl::compare_exchange_strong(&_M_fp,
1488 __expected, __desired,
1489 __success, __failure);
1493 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1494 memory_order __order = memory_order_seq_cst)
1497 return compare_exchange_weak(__expected, __desired, __order,
1498 __cmpexch_failure_order(__order));
1502 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1503 memory_order __order = memory_order_seq_cst)
1506 return compare_exchange_weak(__expected, __desired, __order,
1507 __cmpexch_failure_order(__order));
1511 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1512 memory_order __order = memory_order_seq_cst)
1515 return compare_exchange_strong(__expected, __desired, __order,
1516 __cmpexch_failure_order(__order));
1520 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1521 memory_order __order = memory_order_seq_cst)
1524 return compare_exchange_strong(__expected, __desired, __order,
1525 __cmpexch_failure_order(__order));
1528#if __glibcxx_atomic_wait
1529 _GLIBCXX_ALWAYS_INLINE
void
1530 wait(_Fp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1531 { __atomic_impl::wait(&_M_fp, __old, __m); }
1535 _GLIBCXX_ALWAYS_INLINE
void
1536 notify_one() const noexcept
1537 { __atomic_impl::notify_one(&_M_fp); }
1541 _GLIBCXX_ALWAYS_INLINE
void
1542 notify_all() const noexcept
1543 { __atomic_impl::notify_all(&_M_fp); }
1549 fetch_add(value_type __i,
1550 memory_order __m = memory_order_seq_cst)
noexcept
1551 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1554 fetch_add(value_type __i,
1555 memory_order __m = memory_order_seq_cst)
volatile noexcept
1556 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1559 fetch_sub(value_type __i,
1560 memory_order __m = memory_order_seq_cst)
noexcept
1561 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1564 fetch_sub(value_type __i,
1565 memory_order __m = memory_order_seq_cst)
volatile noexcept
1566 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1568#if __glibcxx_atomic_min_max
1570 fetch_min(value_type __i,
1571 memory_order __m = memory_order_seq_cst)
noexcept
1572 {
return __atomic_impl::__fetch_min(&_M_fp, __i, __m); }
1575 fetch_min(value_type __i,
1576 memory_order __m = memory_order_seq_cst)
volatile noexcept
1577 {
return __atomic_impl::__fetch_min(&_M_fp, __i, __m); }
1580 fetch_max(value_type __i,
1581 memory_order __m = memory_order_seq_cst)
noexcept
1582 {
return __atomic_impl::__fetch_max(&_M_fp, __i, __m); }
1585 fetch_max(value_type __i,
1586 memory_order __m = memory_order_seq_cst)
volatile noexcept
1587 {
return __atomic_impl::__fetch_max(&_M_fp, __i, __m); }
1591 operator+=(value_type __i)
noexcept
1592 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1595 operator+=(value_type __i)
volatile noexcept
1596 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1599 operator-=(value_type __i)
noexcept
1600 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1603 operator-=(value_type __i)
volatile noexcept
1604 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1607 alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0);
1609#undef _GLIBCXX20_INIT
1619 template<
typename _Tp>
1620 struct __atomic_ref_base;
1622 template<
typename _Tp>
1623 struct __atomic_ref_base<const _Tp>
1626 using _Vt = remove_cv_t<_Tp>;
1628 static consteval bool
1629 _S_is_always_lock_free()
1631 if constexpr (is_pointer_v<_Vt>)
1632 return ATOMIC_POINTER_LOCK_FREE == 2;
1634 return __atomic_always_lock_free(
sizeof(_Vt), 0);
1637 static consteval int
1638 _S_required_alignment()
1640 if constexpr (is_floating_point_v<_Vt> || is_pointer_v<_Vt>)
1641 return __alignof__(_Vt);
1642 else if constexpr ((
sizeof(_Vt) & (
sizeof(_Vt) - 1)) ||
sizeof(_Vt) > 16)
1643 return alignof(_Vt);
1647 return (
sizeof(_Vt) >
alignof(_Vt)) ?
sizeof(_Vt) : alignof(_Vt);
1651 using value_type = _Vt;
1652 static_assert(is_trivially_copyable_v<value_type>);
1654 static constexpr bool is_always_lock_free = _S_is_always_lock_free();
1655 static_assert(is_always_lock_free || !is_volatile_v<_Tp>,
1656 "atomic operations on volatile T must be lock-free");
1658 static constexpr size_t required_alignment = _S_required_alignment();
1660 __atomic_ref_base() =
delete;
1661 __atomic_ref_base& operator=(
const __atomic_ref_base&) =
delete;
1664 __atomic_ref_base(
const _Tp* __ptr) noexcept
1665 : _M_ptr(
const_cast<_Tp*
>(__ptr))
1668 __atomic_ref_base(
const __atomic_ref_base&)
noexcept =
default;
1670 operator value_type() const noexcept {
return this->load(); }
1673 is_lock_free() const noexcept
1674 {
return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
1677 load(memory_order __m = memory_order_seq_cst)
const noexcept
1678 {
return __atomic_impl::load(_M_ptr, __m); }
1680#if __glibcxx_atomic_wait
1681 _GLIBCXX_ALWAYS_INLINE
void
1682 wait(value_type __old, memory_order __m = memory_order_seq_cst)
const noexcept
1685 static_assert(!is_volatile_v<_Tp>,
1686 "atomic waits on volatile are not supported");
1687 __atomic_impl::wait(_M_ptr, __old, __m);
1691#if __glibcxx_atomic_ref >= 202411L
1692 _GLIBCXX_ALWAYS_INLINE
constexpr const _Tp*
1693 address() const noexcept
1701 template<
typename _Tp>
1702 struct __atomic_ref_base
1703 : __atomic_ref_base<const _Tp>
1705 using value_type =
typename __atomic_ref_base<const _Tp>::value_type;
1708 __atomic_ref_base(_Tp* __ptr) noexcept
1709 : __atomic_ref_base<const _Tp>(__ptr)
1713 operator=(value_type __t)
const noexcept
1720 store(value_type __t, memory_order __m = memory_order_seq_cst)
const noexcept
1721 { __atomic_impl::store(this->_M_ptr, __t, __m); }
1724 exchange(value_type __desired, memory_order __m = memory_order_seq_cst)
1726 {
return __atomic_impl::exchange(this->_M_ptr, __desired, __m); }
1729 compare_exchange_weak(value_type& __expected, value_type __desired,
1730 memory_order __success,
1731 memory_order __failure)
const noexcept
1733 return __atomic_impl::compare_exchange_weak<true>(
1734 this->_M_ptr, __expected, __desired, __success, __failure);
1738 compare_exchange_strong(value_type& __expected, value_type __desired,
1739 memory_order __success,
1740 memory_order __failure)
const noexcept
1742 return __atomic_impl::compare_exchange_strong<true>(
1743 this->_M_ptr, __expected, __desired, __success, __failure);
1747 compare_exchange_weak(value_type& __expected, value_type __desired,
1748 memory_order __order = memory_order_seq_cst)
1751 return compare_exchange_weak(__expected, __desired, __order,
1752 __cmpexch_failure_order(__order));
1756 compare_exchange_strong(value_type& __expected, value_type __desired,
1757 memory_order __order = memory_order_seq_cst)
1760 return compare_exchange_strong(__expected, __desired, __order,
1761 __cmpexch_failure_order(__order));
1764#if __glibcxx_atomic_wait
1765 _GLIBCXX_ALWAYS_INLINE
void
1766 notify_one() const noexcept
1769 static_assert(!is_volatile_v<_Tp>,
1770 "atomic waits on volatile are not supported");
1771 __atomic_impl::notify_one(this->_M_ptr);
1774 _GLIBCXX_ALWAYS_INLINE
void
1775 notify_all() const noexcept
1778 static_assert(!is_volatile_v<_Tp>,
1779 "atomic waits on volatile are not supported");
1780 __atomic_impl::notify_all(this->_M_ptr);
1784#if __glibcxx_atomic_ref >= 202411L
1785 _GLIBCXX_ALWAYS_INLINE
constexpr _Tp*
1786 address() const noexcept
1787 {
return this->_M_ptr; }
1791 template<
typename _Tp,
1792 bool = is_integral_v<_Tp> && !is_same_v<remove_cv_t<_Tp>,
bool>,
1793 bool = is_floating_point_v<_Tp>,
1794 bool = is_pointer_v<_Tp>>
1795 struct __atomic_ref;
1798 template<
typename _Tp>
1799 struct __atomic_ref<_Tp, false, false, false>
1800 : __atomic_ref_base<_Tp>
1802 using __atomic_ref_base<_Tp>::__atomic_ref_base;
1803 using __atomic_ref_base<_Tp>::operator=;
1806 template<
typename _Tp>
1807 struct __atomic_ref<const _Tp, false, false, false>
1808 : __atomic_ref_base<const _Tp>
1810 using __atomic_ref_base<
const _Tp>::__atomic_ref_base;
1814 template<
typename _Tp>
1815 struct __atomic_ref<_Tp, true, false, false>
1816 : __atomic_ref_base<_Tp>
1818 using value_type =
typename __atomic_ref_base<_Tp>::value_type;
1819 using difference_type = value_type;
1821 using __atomic_ref_base<_Tp>::__atomic_ref_base;
1822 using __atomic_ref_base<_Tp>::operator=;
1825 fetch_add(value_type __i,
1826 memory_order __m = memory_order_seq_cst)
const noexcept
1827 {
return __atomic_impl::fetch_add(this->_M_ptr, __i, __m); }
1830 fetch_sub(value_type __i,
1831 memory_order __m = memory_order_seq_cst)
const noexcept
1832 {
return __atomic_impl::fetch_sub(this->_M_ptr, __i, __m); }
1835 fetch_and(value_type __i,
1836 memory_order __m = memory_order_seq_cst)
const noexcept
1837 {
return __atomic_impl::fetch_and(this->_M_ptr, __i, __m); }
1840 fetch_or(value_type __i,
1841 memory_order __m = memory_order_seq_cst)
const noexcept
1842 {
return __atomic_impl::fetch_or(this->_M_ptr, __i, __m); }
1845 fetch_xor(value_type __i,
1846 memory_order __m = memory_order_seq_cst)
const noexcept
1847 {
return __atomic_impl::fetch_xor(this->_M_ptr, __i, __m); }
1849#if __glibcxx_atomic_min_max
1851 fetch_min(value_type __i,
1852 memory_order __m = memory_order_seq_cst)
const noexcept
1853 {
return __atomic_impl::__fetch_min(this->_M_ptr, __i, __m); }
1856 fetch_max(value_type __i,
1857 memory_order __m = memory_order_seq_cst)
const noexcept
1858 {
return __atomic_impl::__fetch_max(this->_M_ptr, __i, __m); }
1861 _GLIBCXX_ALWAYS_INLINE value_type
1862 operator++(
int)
const noexcept
1863 {
return fetch_add(1); }
1865 _GLIBCXX_ALWAYS_INLINE value_type
1866 operator--(
int)
const noexcept
1867 {
return fetch_sub(1); }
1870 operator++() const noexcept
1871 {
return __atomic_impl::__add_fetch(this->_M_ptr, value_type(1)); }
1874 operator--() const noexcept
1875 {
return __atomic_impl::__sub_fetch(this->_M_ptr, value_type(1)); }
1878 operator+=(value_type __i)
const noexcept
1879 {
return __atomic_impl::__add_fetch(this->_M_ptr, __i); }
1882 operator-=(value_type __i)
const noexcept
1883 {
return __atomic_impl::__sub_fetch(this->_M_ptr, __i); }
1886 operator&=(value_type __i)
const noexcept
1887 {
return __atomic_impl::__and_fetch(this->_M_ptr, __i); }
1890 operator|=(value_type __i)
const noexcept
1891 {
return __atomic_impl::__or_fetch(this->_M_ptr, __i); }
1894 operator^=(value_type __i)
const noexcept
1895 {
return __atomic_impl::__xor_fetch(this->_M_ptr, __i); }
1898 template<
typename _Tp>
1899 struct __atomic_ref<const _Tp, true, false, false>
1900 : __atomic_ref_base<const _Tp>
1902 using difference_type =
typename __atomic_ref_base<const _Tp>::value_type;
1903 using __atomic_ref_base<
const _Tp>::__atomic_ref_base;
1907 template<
typename _Fp>
1908 struct __atomic_ref<_Fp, false, true, false>
1909 : __atomic_ref_base<_Fp>
1911 using value_type =
typename __atomic_ref_base<_Fp>::value_type;
1912 using difference_type = value_type;
1914 using __atomic_ref_base<_Fp>::__atomic_ref_base;
1915 using __atomic_ref_base<_Fp>::operator=;
1918 fetch_add(value_type __i,
1919 memory_order __m = memory_order_seq_cst)
const noexcept
1920 {
return __atomic_impl::__fetch_add_flt(this->_M_ptr, __i, __m); }
1923 fetch_sub(value_type __i,
1924 memory_order __m = memory_order_seq_cst)
const noexcept
1925 {
return __atomic_impl::__fetch_sub_flt(this->_M_ptr, __i, __m); }
1927#if __glibcxx_atomic_min_max
1929 fetch_min(value_type __i,
1930 memory_order __m = memory_order_seq_cst)
const noexcept
1931 {
return __atomic_impl::__fetch_min(this->_M_ptr, __i, __m); }
1934 fetch_max(value_type __i,
1935 memory_order __m = memory_order_seq_cst)
const noexcept
1936 {
return __atomic_impl::__fetch_max(this->_M_ptr, __i, __m); }
1940 operator+=(value_type __i)
const noexcept
1941 {
return __atomic_impl::__add_fetch_flt(this->_M_ptr, __i); }
1944 operator-=(value_type __i)
const noexcept
1945 {
return __atomic_impl::__sub_fetch_flt(this->_M_ptr, __i); }
1948 template<
typename _Fp>
1949 struct __atomic_ref<const _Fp, false, true, false>
1950 : __atomic_ref_base<const _Fp>
1952 using difference_type =
typename __atomic_ref_base<const _Fp>::value_type;
1953 using __atomic_ref_base<
const _Fp>::__atomic_ref_base;
1957 template<
typename _Pt>
1958 struct __atomic_ref<_Pt, false, false, true>
1959 : __atomic_ref_base<_Pt>
1961 using value_type =
typename __atomic_ref_base<_Pt>::value_type;
1962 using difference_type = ptrdiff_t;
1964 using __atomic_ref_base<_Pt>::__atomic_ref_base;
1965 using __atomic_ref_base<_Pt>::operator=;
1966 _GLIBCXX_ALWAYS_INLINE value_type
1967 fetch_add(difference_type __d,
1968 memory_order __m = memory_order_seq_cst)
const noexcept
1969 {
return __atomic_impl::fetch_add(this->_M_ptr, _S_type_size(__d), __m); }
1971 _GLIBCXX_ALWAYS_INLINE value_type
1972 fetch_sub(difference_type __d,
1973 memory_order __m = memory_order_seq_cst)
const noexcept
1974 {
return __atomic_impl::fetch_sub(this->_M_ptr, _S_type_size(__d), __m); }
1977 operator++(
int)
const noexcept
1978 {
return fetch_add(1); }
1981 operator--(
int)
const noexcept
1982 {
return fetch_sub(1); }
1985 operator++() const noexcept
1987 return __atomic_impl::__add_fetch(this->_M_ptr, _S_type_size(1));
1991 operator--() const noexcept
1993 return __atomic_impl::__sub_fetch(this->_M_ptr, _S_type_size(1));
1997 operator+=(difference_type __d)
const noexcept
1999 return __atomic_impl::__add_fetch(this->_M_ptr, _S_type_size(__d));
2003 operator-=(difference_type __d)
const noexcept
2005 return __atomic_impl::__sub_fetch(this->_M_ptr, _S_type_size(__d));
2009 static constexpr ptrdiff_t
2010 _S_type_size(ptrdiff_t __d)
noexcept
2012 using _Et = remove_pointer_t<value_type>;
2013 static_assert(is_object_v<_Et>);
2014 return __d *
sizeof(_Et);
2018 template<
typename _Pt>
2019 struct __atomic_ref<const _Pt, false, false, true>
2020 : __atomic_ref_base<const _Pt>
2022 using difference_type = ptrdiff_t;
2023 using __atomic_ref_base<
const _Pt>::__atomic_ref_base;
2031_GLIBCXX_END_NAMESPACE_VERSION
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
_Tp kill_dependency(_Tp __y) noexcept
kill_dependency
memory_order
Enumeration for memory_order.
ISO C++ entities toplevel namespace is std.
constexpr bitset< _Nb > operator|(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
constexpr bitset< _Nb > operator&(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.