libstdc++
atomic_base.h
Go to the documentation of this file.
1// -*- C++ -*- header.
2
3// Copyright (C) 2008-2026 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file bits/atomic_base.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{atomic}
28 */
29
30#ifndef _GLIBCXX_ATOMIC_BASE_H
31#define _GLIBCXX_ATOMIC_BASE_H 1
32
33#ifdef _GLIBCXX_SYSHDR
34#pragma GCC system_header
35#endif
36
37#include <bits/c++config.h>
38#include <new> // For placement new
40#include <bits/move.h>
41
42#if __cplusplus > 201703L && _GLIBCXX_HOSTED
43#include <bits/atomic_wait.h>
44#endif
45
46#ifndef _GLIBCXX_ALWAYS_INLINE
47#define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
48#endif
49
50#include <bits/version.h>
51
52namespace std _GLIBCXX_VISIBILITY(default)
53{
54_GLIBCXX_BEGIN_NAMESPACE_VERSION
55
56 /**
57 * @defgroup atomics Atomics
58 *
59 * Components for performing atomic operations.
60 * @{
61 */
62
63 /// Enumeration for memory_order
64#if __cplusplus > 201703L
65 enum class memory_order : int
66 {
67 relaxed,
68 consume,
69 acquire,
70 release,
71 acq_rel,
72 seq_cst
73 };
74
75 inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
76 inline constexpr memory_order memory_order_consume = memory_order::consume;
77 inline constexpr memory_order memory_order_acquire = memory_order::acquire;
78 inline constexpr memory_order memory_order_release = memory_order::release;
79 inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
80 inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
81#else
82 enum memory_order : int
83 {
84 memory_order_relaxed,
85 memory_order_consume,
86 memory_order_acquire,
87 memory_order_release,
88 memory_order_acq_rel,
89 memory_order_seq_cst
90 };
91#endif
92
93 /// @cond undocumented
94 enum __memory_order_modifier
95 {
96 __memory_order_mask = 0x0ffff,
97 __memory_order_modifier_mask = 0xffff0000,
98 __memory_order_hle_acquire = 0x10000,
99 __memory_order_hle_release = 0x20000
100 };
101 /// @endcond
102
103 constexpr memory_order
104 operator|(memory_order __m, __memory_order_modifier __mod) noexcept
105 {
106 return memory_order(int(__m) | int(__mod));
107 }
108
109 constexpr memory_order
110 operator&(memory_order __m, __memory_order_modifier __mod) noexcept
111 {
112 return memory_order(int(__m) & int(__mod));
113 }
114
115 /// @cond undocumented
116
117 // Drop release ordering as per [atomics.types.operations.req]/21
118 constexpr memory_order
119 __cmpexch_failure_order2(memory_order __m) noexcept
120 {
121 return __m == memory_order_acq_rel ? memory_order_acquire
122 : __m == memory_order_release ? memory_order_relaxed : __m;
123 }
124
125 constexpr memory_order
126 __cmpexch_failure_order(memory_order __m) noexcept
127 {
128 return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
129 | __memory_order_modifier(__m & __memory_order_modifier_mask));
130 }
131
132 constexpr bool
133 __is_valid_cmpexch_failure_order(memory_order __m) noexcept
134 {
135 return (__m & __memory_order_mask) != memory_order_release
136 && (__m & __memory_order_mask) != memory_order_acq_rel;
137 }
138
139 // Base types for atomics.
140 template<typename _IntTp>
141 struct __atomic_base;
142
143 /// @endcond
144
145 _GLIBCXX_ALWAYS_INLINE void
146 atomic_thread_fence(memory_order __m) noexcept
147 { __atomic_thread_fence(int(__m)); }
148
149 _GLIBCXX_ALWAYS_INLINE void
150 atomic_signal_fence(memory_order __m) noexcept
151 { __atomic_signal_fence(int(__m)); }
152
153 /// kill_dependency
154 template<typename _Tp>
155 inline _Tp
156 kill_dependency(_Tp __y) noexcept
157 {
158 _Tp __ret(__y);
159 return __ret;
160 }
161
162/// @cond undocumented
163#if __glibcxx_atomic_value_initialization
164# define _GLIBCXX20_INIT(I) = I
165#else
166# define _GLIBCXX20_INIT(I)
167#endif
168/// @endcond
169
170#define ATOMIC_VAR_INIT(_VI) { _VI }
171
172 template<typename _Tp>
173 struct atomic;
174
175 template<typename _Tp>
176 struct atomic<_Tp*>;
177
178 /* The target's "set" value for test-and-set may not be exactly 1. */
179#if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
180 typedef bool __atomic_flag_data_type;
181#else
182 typedef unsigned char __atomic_flag_data_type;
183#endif
184
185 /// @cond undocumented
186
187 /*
188 * Base type for atomic_flag.
189 *
190 * Base type is POD with data, allowing atomic_flag to derive from
191 * it and meet the standard layout type requirement. In addition to
192 * compatibility with a C interface, this allows different
193 * implementations of atomic_flag to use the same atomic operation
194 * functions, via a standard conversion to the __atomic_flag_base
195 * argument.
196 */
197 _GLIBCXX_BEGIN_EXTERN_C
198
199 struct __atomic_flag_base
200 {
201 __atomic_flag_data_type _M_i _GLIBCXX20_INIT({});
202 };
203
204 _GLIBCXX_END_EXTERN_C
205
206 /// @endcond
207
208#define ATOMIC_FLAG_INIT { 0 }
209
210 /// atomic_flag
211 struct atomic_flag : public __atomic_flag_base
212 {
213 atomic_flag() noexcept = default;
214 ~atomic_flag() noexcept = default;
215 atomic_flag(const atomic_flag&) = delete;
216 atomic_flag& operator=(const atomic_flag&) = delete;
217 atomic_flag& operator=(const atomic_flag&) volatile = delete;
218
219 // Conversion to ATOMIC_FLAG_INIT.
220 constexpr atomic_flag(bool __i) noexcept
221 : __atomic_flag_base{ _S_init(__i) }
222 { }
223
224 _GLIBCXX_ALWAYS_INLINE bool
225 test_and_set(memory_order __m = memory_order_seq_cst) noexcept
226 {
227 return __atomic_test_and_set (&_M_i, int(__m));
228 }
229
230 _GLIBCXX_ALWAYS_INLINE bool
231 test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
232 {
233 return __atomic_test_and_set (&_M_i, int(__m));
234 }
235
236#ifdef __glibcxx_atomic_flag_test // C++ >= 20
237 _GLIBCXX_ALWAYS_INLINE bool
238 test(memory_order __m = memory_order_seq_cst) const noexcept
239 {
240 __atomic_flag_data_type __v;
241 __atomic_load(&_M_i, &__v, int(__m));
242 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
243 }
244
245 _GLIBCXX_ALWAYS_INLINE bool
246 test(memory_order __m = memory_order_seq_cst) const volatile noexcept
247 {
248 __atomic_flag_data_type __v;
249 __atomic_load(&_M_i, &__v, int(__m));
250 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
251 }
252#endif
253
254#if __glibcxx_atomic_wait // C++ >= 20 && (linux_futex || gthread)
255 _GLIBCXX_ALWAYS_INLINE void
256 wait(bool __old,
257 memory_order __m = memory_order_seq_cst) const noexcept
258 {
259 const __atomic_flag_data_type __v
260 = __old ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0;
261
262 std::__atomic_wait_address_v(&_M_i, __v,
263 [__m, this] { return __atomic_load_n(&_M_i, int(__m)); });
264 }
265
266 // TODO add const volatile overload
267
268 _GLIBCXX_ALWAYS_INLINE void
269 notify_one() noexcept
270 { std::__atomic_notify_address(&_M_i, false); }
271
272 // TODO add const volatile overload
273
274 _GLIBCXX_ALWAYS_INLINE void
275 notify_all() noexcept
276 { std::__atomic_notify_address(&_M_i, true); }
277
278 // TODO add const volatile overload
279#endif // __glibcxx_atomic_wait
280
281 _GLIBCXX_ALWAYS_INLINE void
282 clear(memory_order __m = memory_order_seq_cst) noexcept
283 {
284 memory_order __b __attribute__ ((__unused__))
285 = __m & __memory_order_mask;
286 __glibcxx_assert(__b != memory_order_consume);
287 __glibcxx_assert(__b != memory_order_acquire);
288 __glibcxx_assert(__b != memory_order_acq_rel);
289
290 __atomic_clear (&_M_i, int(__m));
291 }
292
293 _GLIBCXX_ALWAYS_INLINE void
294 clear(memory_order __m = memory_order_seq_cst) volatile noexcept
295 {
296 memory_order __b __attribute__ ((__unused__))
297 = __m & __memory_order_mask;
298 __glibcxx_assert(__b != memory_order_consume);
299 __glibcxx_assert(__b != memory_order_acquire);
300 __glibcxx_assert(__b != memory_order_acq_rel);
301
302 __atomic_clear (&_M_i, int(__m));
303 }
304
305 private:
306 static constexpr __atomic_flag_data_type
307 _S_init(bool __i)
308 { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
309 };
310
311 /// @cond undocumented
312
313 /// Base class for atomic integrals.
314 //
315 // For each of the integral types, define atomic_[integral type] struct
316 //
317 // atomic_bool bool
318 // atomic_char char
319 // atomic_schar signed char
320 // atomic_uchar unsigned char
321 // atomic_short short
322 // atomic_ushort unsigned short
323 // atomic_int int
324 // atomic_uint unsigned int
325 // atomic_long long
326 // atomic_ulong unsigned long
327 // atomic_llong long long
328 // atomic_ullong unsigned long long
329 // atomic_char8_t char8_t
330 // atomic_char16_t char16_t
331 // atomic_char32_t char32_t
332 // atomic_wchar_t wchar_t
333 //
334 // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
335 // 8 bytes, since that is what GCC built-in functions for atomic
336 // memory access expect.
337
338 namespace __atomic_impl
339 {
340 template<typename _Tp>
341 using _Val = typename remove_volatile<_Tp>::type;
342
343#if __glibcxx_atomic_min_max
344 template<typename _Tp>
345 _Tp
346 __fetch_min(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept;
347
348 template<typename _Tp>
349 _Tp
350 __fetch_max(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept;
351#endif
352 }
353
354 template<typename _ITp>
355 struct __atomic_base
356 {
357 using value_type = _ITp;
358 using difference_type = value_type;
359
360 private:
361 typedef _ITp __int_type;
362
363 static constexpr int _S_alignment =
364 sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp);
365
366 alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0);
367
368 public:
369 __atomic_base() noexcept = default;
370 ~__atomic_base() noexcept = default;
371 __atomic_base(const __atomic_base&) = delete;
372 __atomic_base& operator=(const __atomic_base&) = delete;
373 __atomic_base& operator=(const __atomic_base&) volatile = delete;
374
375 constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
376
377 operator __int_type() const noexcept
378 { return load(); }
379
380 operator __int_type() const volatile noexcept
381 { return load(); }
382
383 __int_type
384 operator=(__int_type __i) noexcept
385 {
386 store(__i);
387 return __i;
388 }
389
390 __int_type
391 operator=(__int_type __i) volatile noexcept
392 {
393 store(__i);
394 return __i;
395 }
396
397 __int_type
398 operator++(int) noexcept
399 { return fetch_add(1); }
400
401 __int_type
402 operator++(int) volatile noexcept
403 { return fetch_add(1); }
404
405 __int_type
406 operator--(int) noexcept
407 { return fetch_sub(1); }
408
409 __int_type
410 operator--(int) volatile noexcept
411 { return fetch_sub(1); }
412
413 __int_type
414 operator++() noexcept
415 { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
416
417 __int_type
418 operator++() volatile noexcept
419 { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
420
421 __int_type
422 operator--() noexcept
423 { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
424
425 __int_type
426 operator--() volatile noexcept
427 { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
428
429 __int_type
430 operator+=(__int_type __i) noexcept
431 { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
432
433 __int_type
434 operator+=(__int_type __i) volatile noexcept
435 { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
436
437 __int_type
438 operator-=(__int_type __i) noexcept
439 { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
440
441 __int_type
442 operator-=(__int_type __i) volatile noexcept
443 { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
444
445 __int_type
446 operator&=(__int_type __i) noexcept
447 { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
448
449 __int_type
450 operator&=(__int_type __i) volatile noexcept
451 { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
452
453 __int_type
454 operator|=(__int_type __i) noexcept
455 { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
456
457 __int_type
458 operator|=(__int_type __i) volatile noexcept
459 { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
460
461 __int_type
462 operator^=(__int_type __i) noexcept
463 { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
464
465 __int_type
466 operator^=(__int_type __i) volatile noexcept
467 { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
468
469 bool
470 is_lock_free() const noexcept
471 {
472 // Use a fake, minimally aligned pointer.
473 return __atomic_is_lock_free(sizeof(_M_i),
474 reinterpret_cast<void *>(-_S_alignment));
475 }
476
477 bool
478 is_lock_free() const volatile noexcept
479 {
480 // Use a fake, minimally aligned pointer.
481 return __atomic_is_lock_free(sizeof(_M_i),
482 reinterpret_cast<void *>(-_S_alignment));
483 }
484
485 _GLIBCXX_ALWAYS_INLINE void
486 store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
487 {
488 memory_order __b __attribute__ ((__unused__))
489 = __m & __memory_order_mask;
490 __glibcxx_assert(__b != memory_order_acquire);
491 __glibcxx_assert(__b != memory_order_acq_rel);
492 __glibcxx_assert(__b != memory_order_consume);
493
494 __atomic_store_n(&_M_i, __i, int(__m));
495 }
496
497 _GLIBCXX_ALWAYS_INLINE void
498 store(__int_type __i,
499 memory_order __m = memory_order_seq_cst) volatile noexcept
500 {
501 memory_order __b __attribute__ ((__unused__))
502 = __m & __memory_order_mask;
503 __glibcxx_assert(__b != memory_order_acquire);
504 __glibcxx_assert(__b != memory_order_acq_rel);
505 __glibcxx_assert(__b != memory_order_consume);
506
507 __atomic_store_n(&_M_i, __i, int(__m));
508 }
509
510 _GLIBCXX_ALWAYS_INLINE __int_type
511 load(memory_order __m = memory_order_seq_cst) const noexcept
512 {
513 memory_order __b __attribute__ ((__unused__))
514 = __m & __memory_order_mask;
515 __glibcxx_assert(__b != memory_order_release);
516 __glibcxx_assert(__b != memory_order_acq_rel);
517
518 return __atomic_load_n(&_M_i, int(__m));
519 }
520
521 _GLIBCXX_ALWAYS_INLINE __int_type
522 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
523 {
524 memory_order __b __attribute__ ((__unused__))
525 = __m & __memory_order_mask;
526 __glibcxx_assert(__b != memory_order_release);
527 __glibcxx_assert(__b != memory_order_acq_rel);
528
529 return __atomic_load_n(&_M_i, int(__m));
530 }
531
532 _GLIBCXX_ALWAYS_INLINE __int_type
533 exchange(__int_type __i,
534 memory_order __m = memory_order_seq_cst) noexcept
535 {
536 return __atomic_exchange_n(&_M_i, __i, int(__m));
537 }
538
539
540 _GLIBCXX_ALWAYS_INLINE __int_type
541 exchange(__int_type __i,
542 memory_order __m = memory_order_seq_cst) volatile noexcept
543 {
544 return __atomic_exchange_n(&_M_i, __i, int(__m));
545 }
546
547 _GLIBCXX_ALWAYS_INLINE bool
548 compare_exchange_weak(__int_type& __i1, __int_type __i2,
549 memory_order __m1, memory_order __m2) noexcept
550 {
551 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
552
553 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
554 int(__m1), int(__m2));
555 }
556
557 _GLIBCXX_ALWAYS_INLINE bool
558 compare_exchange_weak(__int_type& __i1, __int_type __i2,
559 memory_order __m1,
560 memory_order __m2) volatile noexcept
561 {
562 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
563
564 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
565 int(__m1), int(__m2));
566 }
567
568 _GLIBCXX_ALWAYS_INLINE bool
569 compare_exchange_weak(__int_type& __i1, __int_type __i2,
570 memory_order __m = memory_order_seq_cst) noexcept
571 {
572 return compare_exchange_weak(__i1, __i2, __m,
573 __cmpexch_failure_order(__m));
574 }
575
576 _GLIBCXX_ALWAYS_INLINE bool
577 compare_exchange_weak(__int_type& __i1, __int_type __i2,
578 memory_order __m = memory_order_seq_cst) volatile noexcept
579 {
580 return compare_exchange_weak(__i1, __i2, __m,
581 __cmpexch_failure_order(__m));
582 }
583
584 _GLIBCXX_ALWAYS_INLINE bool
585 compare_exchange_strong(__int_type& __i1, __int_type __i2,
586 memory_order __m1, memory_order __m2) noexcept
587 {
588 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
589
590 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
591 int(__m1), int(__m2));
592 }
593
594 _GLIBCXX_ALWAYS_INLINE bool
595 compare_exchange_strong(__int_type& __i1, __int_type __i2,
596 memory_order __m1,
597 memory_order __m2) volatile noexcept
598 {
599 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
600
601 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
602 int(__m1), int(__m2));
603 }
604
605 _GLIBCXX_ALWAYS_INLINE bool
606 compare_exchange_strong(__int_type& __i1, __int_type __i2,
607 memory_order __m = memory_order_seq_cst) noexcept
608 {
609 return compare_exchange_strong(__i1, __i2, __m,
610 __cmpexch_failure_order(__m));
611 }
612
613 _GLIBCXX_ALWAYS_INLINE bool
614 compare_exchange_strong(__int_type& __i1, __int_type __i2,
615 memory_order __m = memory_order_seq_cst) volatile noexcept
616 {
617 return compare_exchange_strong(__i1, __i2, __m,
618 __cmpexch_failure_order(__m));
619 }
620
621#if __glibcxx_atomic_wait
622 _GLIBCXX_ALWAYS_INLINE void
623 wait(__int_type __old,
624 memory_order __m = memory_order_seq_cst) const noexcept
625 {
626 std::__atomic_wait_address_v(&_M_i, __old,
627 [__m, this] { return this->load(__m); });
628 }
629
630 // TODO add const volatile overload
631
632 _GLIBCXX_ALWAYS_INLINE void
633 notify_one() noexcept
634 { std::__atomic_notify_address(&_M_i, false); }
635
636 // TODO add const volatile overload
637
638 _GLIBCXX_ALWAYS_INLINE void
639 notify_all() noexcept
640 { std::__atomic_notify_address(&_M_i, true); }
641
642 // TODO add const volatile overload
643#endif // __glibcxx_atomic_wait
644
645 _GLIBCXX_ALWAYS_INLINE __int_type
646 fetch_add(__int_type __i,
647 memory_order __m = memory_order_seq_cst) noexcept
648 { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
649
650 _GLIBCXX_ALWAYS_INLINE __int_type
651 fetch_add(__int_type __i,
652 memory_order __m = memory_order_seq_cst) volatile noexcept
653 { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
654
655 _GLIBCXX_ALWAYS_INLINE __int_type
656 fetch_sub(__int_type __i,
657 memory_order __m = memory_order_seq_cst) noexcept
658 { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
659
660 _GLIBCXX_ALWAYS_INLINE __int_type
661 fetch_sub(__int_type __i,
662 memory_order __m = memory_order_seq_cst) volatile noexcept
663 { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
664
665 _GLIBCXX_ALWAYS_INLINE __int_type
666 fetch_and(__int_type __i,
667 memory_order __m = memory_order_seq_cst) noexcept
668 { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
669
670 _GLIBCXX_ALWAYS_INLINE __int_type
671 fetch_and(__int_type __i,
672 memory_order __m = memory_order_seq_cst) volatile noexcept
673 { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
674
675 _GLIBCXX_ALWAYS_INLINE __int_type
676 fetch_or(__int_type __i,
677 memory_order __m = memory_order_seq_cst) noexcept
678 { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
679
680 _GLIBCXX_ALWAYS_INLINE __int_type
681 fetch_or(__int_type __i,
682 memory_order __m = memory_order_seq_cst) volatile noexcept
683 { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
684
685 _GLIBCXX_ALWAYS_INLINE __int_type
686 fetch_xor(__int_type __i,
687 memory_order __m = memory_order_seq_cst) noexcept
688 { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
689
690 _GLIBCXX_ALWAYS_INLINE __int_type
691 fetch_xor(__int_type __i,
692 memory_order __m = memory_order_seq_cst) volatile noexcept
693 { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
694
695#if __glibcxx_atomic_min_max
696 _GLIBCXX_ALWAYS_INLINE __int_type
697 fetch_min(__int_type __i,
698 memory_order __m = memory_order_seq_cst) noexcept
699 { return __atomic_impl::__fetch_min(&_M_i, __i, __m); }
700
701 _GLIBCXX_ALWAYS_INLINE __int_type
702 fetch_min(__int_type __i,
703 memory_order __m = memory_order_seq_cst) volatile noexcept
704 { return __atomic_impl::__fetch_min(&_M_i, __i, __m); }
705
706 _GLIBCXX_ALWAYS_INLINE __int_type
707 fetch_max(__int_type __i,
708 memory_order __m = memory_order_seq_cst) noexcept
709 { return __atomic_impl::__fetch_max(&_M_i, __i, __m); }
710
711 _GLIBCXX_ALWAYS_INLINE __int_type
712 fetch_max(__int_type __i,
713 memory_order __m = memory_order_seq_cst) volatile noexcept
714 { return __atomic_impl::__fetch_max(&_M_i, __i, __m); }
715#endif
716 };
717
718
719 /// Partial specialization for pointer types.
720 template<typename _PTp>
721 struct __atomic_base<_PTp*>
722 {
723 private:
724 typedef _PTp* __pointer_type;
725
726 __pointer_type _M_p _GLIBCXX20_INIT(nullptr);
727
728 static constexpr ptrdiff_t
729 _S_type_size(ptrdiff_t __d)
730 { return __d * sizeof(_PTp); }
731
732 public:
733 __atomic_base() noexcept = default;
734 ~__atomic_base() noexcept = default;
735 __atomic_base(const __atomic_base&) = delete;
736 __atomic_base& operator=(const __atomic_base&) = delete;
737 __atomic_base& operator=(const __atomic_base&) volatile = delete;
738
739 // Requires __pointer_type convertible to _M_p.
740 constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
741
742 operator __pointer_type() const noexcept
743 { return load(); }
744
745 operator __pointer_type() const volatile noexcept
746 { return load(); }
747
748 __pointer_type
749 operator=(__pointer_type __p) noexcept
750 {
751 store(__p);
752 return __p;
753 }
754
755 __pointer_type
756 operator=(__pointer_type __p) volatile noexcept
757 {
758 store(__p);
759 return __p;
760 }
761
762 __pointer_type
763 operator++(int) noexcept
764 { return fetch_add(1); }
765
766 __pointer_type
767 operator++(int) volatile noexcept
768 { return fetch_add(1); }
769
770 __pointer_type
771 operator--(int) noexcept
772 { return fetch_sub(1); }
773
774 __pointer_type
775 operator--(int) volatile noexcept
776 { return fetch_sub(1); }
777
778 __pointer_type
779 operator++() noexcept
780 { return __atomic_add_fetch(&_M_p, _S_type_size(1),
781 int(memory_order_seq_cst)); }
782
783 __pointer_type
784 operator++() volatile noexcept
785 { return __atomic_add_fetch(&_M_p, _S_type_size(1),
786 int(memory_order_seq_cst)); }
787
788 __pointer_type
789 operator--() noexcept
790 { return __atomic_sub_fetch(&_M_p, _S_type_size(1),
791 int(memory_order_seq_cst)); }
792
793 __pointer_type
794 operator--() volatile noexcept
795 { return __atomic_sub_fetch(&_M_p, _S_type_size(1),
796 int(memory_order_seq_cst)); }
797
798 __pointer_type
799 operator+=(ptrdiff_t __d) noexcept
800 { return __atomic_add_fetch(&_M_p, _S_type_size(__d),
801 int(memory_order_seq_cst)); }
802
803 __pointer_type
804 operator+=(ptrdiff_t __d) volatile noexcept
805 { return __atomic_add_fetch(&_M_p, _S_type_size(__d),
806 int(memory_order_seq_cst)); }
807
808 __pointer_type
809 operator-=(ptrdiff_t __d) noexcept
810 { return __atomic_sub_fetch(&_M_p, _S_type_size(__d),
811 int(memory_order_seq_cst)); }
812
813 __pointer_type
814 operator-=(ptrdiff_t __d) volatile noexcept
815 { return __atomic_sub_fetch(&_M_p, _S_type_size(__d),
816 int(memory_order_seq_cst)); }
817
818 bool
819 is_lock_free() const noexcept
820 {
821 // Produce a fake, minimally aligned pointer.
822 return __atomic_is_lock_free(sizeof(_M_p),
823 reinterpret_cast<void *>(-__alignof(_M_p)));
824 }
825
826 bool
827 is_lock_free() const volatile noexcept
828 {
829 // Produce a fake, minimally aligned pointer.
830 return __atomic_is_lock_free(sizeof(_M_p),
831 reinterpret_cast<void *>(-__alignof(_M_p)));
832 }
833
834 _GLIBCXX_ALWAYS_INLINE void
835 store(__pointer_type __p,
836 memory_order __m = memory_order_seq_cst) noexcept
837 {
838 memory_order __b __attribute__ ((__unused__))
839 = __m & __memory_order_mask;
840
841 __glibcxx_assert(__b != memory_order_acquire);
842 __glibcxx_assert(__b != memory_order_acq_rel);
843 __glibcxx_assert(__b != memory_order_consume);
844
845 __atomic_store_n(&_M_p, __p, int(__m));
846 }
847
848 _GLIBCXX_ALWAYS_INLINE void
849 store(__pointer_type __p,
850 memory_order __m = memory_order_seq_cst) volatile noexcept
851 {
852 memory_order __b __attribute__ ((__unused__))
853 = __m & __memory_order_mask;
854 __glibcxx_assert(__b != memory_order_acquire);
855 __glibcxx_assert(__b != memory_order_acq_rel);
856 __glibcxx_assert(__b != memory_order_consume);
857
858 __atomic_store_n(&_M_p, __p, int(__m));
859 }
860
861 _GLIBCXX_ALWAYS_INLINE __pointer_type
862 load(memory_order __m = memory_order_seq_cst) const noexcept
863 {
864 memory_order __b __attribute__ ((__unused__))
865 = __m & __memory_order_mask;
866 __glibcxx_assert(__b != memory_order_release);
867 __glibcxx_assert(__b != memory_order_acq_rel);
868
869 return __atomic_load_n(&_M_p, int(__m));
870 }
871
872 _GLIBCXX_ALWAYS_INLINE __pointer_type
873 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
874 {
875 memory_order __b __attribute__ ((__unused__))
876 = __m & __memory_order_mask;
877 __glibcxx_assert(__b != memory_order_release);
878 __glibcxx_assert(__b != memory_order_acq_rel);
879
880 return __atomic_load_n(&_M_p, int(__m));
881 }
882
883 _GLIBCXX_ALWAYS_INLINE __pointer_type
884 exchange(__pointer_type __p,
885 memory_order __m = memory_order_seq_cst) noexcept
886 {
887 return __atomic_exchange_n(&_M_p, __p, int(__m));
888 }
889
890
891 _GLIBCXX_ALWAYS_INLINE __pointer_type
892 exchange(__pointer_type __p,
893 memory_order __m = memory_order_seq_cst) volatile noexcept
894 {
895 return __atomic_exchange_n(&_M_p, __p, int(__m));
896 }
897
898 _GLIBCXX_ALWAYS_INLINE bool
899 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
900 memory_order __m1,
901 memory_order __m2) noexcept
902 {
903 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
904
905 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
906 int(__m1), int(__m2));
907 }
908
909 _GLIBCXX_ALWAYS_INLINE bool
910 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
911 memory_order __m1,
912 memory_order __m2) volatile noexcept
913 {
914 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
915
916 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
917 int(__m1), int(__m2));
918 }
919
920 _GLIBCXX_ALWAYS_INLINE bool
921 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
922 memory_order __m1,
923 memory_order __m2) noexcept
924 {
925 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
926
927 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
928 int(__m1), int(__m2));
929 }
930
931 _GLIBCXX_ALWAYS_INLINE bool
932 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
933 memory_order __m1,
934 memory_order __m2) volatile noexcept
935 {
936 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
937
938 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
939 int(__m1), int(__m2));
940 }
941
942#if __glibcxx_atomic_wait
943 _GLIBCXX_ALWAYS_INLINE void
944 wait(__pointer_type __old,
945 memory_order __m = memory_order_seq_cst) const noexcept
946 {
947 std::__atomic_wait_address_v(&_M_p, __old,
948 [__m, this]
949 { return this->load(__m); });
950 }
951
952 // TODO add const volatile overload
953
954 _GLIBCXX_ALWAYS_INLINE void
955 notify_one() const noexcept
956 { std::__atomic_notify_address(&_M_p, false); }
957
958 // TODO add const volatile overload
959
960 _GLIBCXX_ALWAYS_INLINE void
961 notify_all() const noexcept
962 { std::__atomic_notify_address(&_M_p, true); }
963
964 // TODO add const volatile overload
965#endif // __glibcxx_atomic_wait
966
967 _GLIBCXX_ALWAYS_INLINE __pointer_type
968 fetch_add(ptrdiff_t __d,
969 memory_order __m = memory_order_seq_cst) noexcept
970 { return __atomic_fetch_add(&_M_p, _S_type_size(__d), int(__m)); }
971
972 _GLIBCXX_ALWAYS_INLINE __pointer_type
973 fetch_add(ptrdiff_t __d,
974 memory_order __m = memory_order_seq_cst) volatile noexcept
975 { return __atomic_fetch_add(&_M_p, _S_type_size(__d), int(__m)); }
976
977 _GLIBCXX_ALWAYS_INLINE __pointer_type
978 fetch_sub(ptrdiff_t __d,
979 memory_order __m = memory_order_seq_cst) noexcept
980 { return __atomic_fetch_sub(&_M_p, _S_type_size(__d), int(__m)); }
981
982 _GLIBCXX_ALWAYS_INLINE __pointer_type
983 fetch_sub(ptrdiff_t __d,
984 memory_order __m = memory_order_seq_cst) volatile noexcept
985 { return __atomic_fetch_sub(&_M_p, _S_type_size(__d), int(__m)); }
986 };
987
988 namespace __atomic_impl
989 {
990 // Implementation details of atomic padding handling
991
992 template<typename _Tp>
993 constexpr bool
994 __maybe_has_padding()
995 {
996#if ! __has_builtin(__builtin_clear_padding)
997 return false;
998#elif __has_builtin(__has_unique_object_representations)
999 return !__has_unique_object_representations(_Tp)
1000 && !is_same<_Tp, float>::value && !is_same<_Tp, double>::value;
1001#else
1002 return true;
1003#endif
1004 }
1005
1006 template<typename _Tp>
1007 _GLIBCXX_ALWAYS_INLINE _GLIBCXX14_CONSTEXPR _Tp*
1008 __clear_padding(_Tp& __val) noexcept
1009 {
1010 auto* __ptr = std::__addressof(__val);
1011#if __has_builtin(__builtin_clear_padding)
1012 if _GLIBCXX17_CONSTEXPR (__atomic_impl::__maybe_has_padding<_Tp>())
1013 __builtin_clear_padding(__ptr);
1014#endif
1015 return __ptr;
1016 }
1017
1018#pragma GCC diagnostic push
1019#pragma GCC diagnostic ignored "-Wc++17-extensions"
1020
1021 template<bool _AtomicRef = false, typename _Tp>
1022 _GLIBCXX_ALWAYS_INLINE bool
1023 __compare_exchange(_Tp& __val, _Val<_Tp>& __e, _Val<_Tp>& __i,
1024 bool __is_weak,
1025 memory_order __s, memory_order __f) noexcept
1026 {
1027 __glibcxx_assert(__is_valid_cmpexch_failure_order(__f));
1028
1029 using _Vp = _Val<_Tp>;
1030 _Tp* const __pval = std::__addressof(__val);
1031
1032 if constexpr (!__atomic_impl::__maybe_has_padding<_Vp>())
1033 {
1034 return __atomic_compare_exchange(__pval, std::__addressof(__e),
1035 std::__addressof(__i), __is_weak,
1036 int(__s), int(__f));
1037 }
1038 else if constexpr (!_AtomicRef) // std::atomic<T>
1039 {
1040 // Clear padding of the value we want to set:
1041 _Vp* const __pi = __atomic_impl::__clear_padding(__i);
1042 // Only allowed to modify __e on failure, so make a copy:
1043 _Vp __exp = __e;
1044 // Clear padding of the expected value:
1045 _Vp* const __pexp = __atomic_impl::__clear_padding(__exp);
1046
1047 // For std::atomic<T> we know that the contained value will already
1048 // have zeroed padding, so trivial memcmp semantics are OK.
1049 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1050 __is_weak, int(__s), int(__f)))
1051 return true;
1052 // Value bits must be different, copy from __exp back to __e:
1053 __builtin_memcpy(std::__addressof(__e), __pexp, sizeof(_Vp));
1054 return false;
1055 }
1056 else // std::atomic_ref<T> where T has padding bits.
1057 {
1058 // Clear padding of the value we want to set:
1059 _Vp* const __pi = __atomic_impl::__clear_padding(__i);
1060
1061 // Only allowed to modify __e on failure, so make a copy:
1062 _Vp __exp = __e;
1063 // Optimistically assume that a previous store had zeroed padding
1064 // so that zeroing it in the expected value will match first time.
1065 _Vp* const __pexp = __atomic_impl::__clear_padding(__exp);
1066
1067 // compare_exchange is specified to compare value representations.
1068 // Need to check whether a failure is 'real' or just due to
1069 // differences in padding bits. This loop should run no more than
1070 // three times, because the worst case scenario is:
1071 // First CAS fails because the actual value has non-zero padding.
1072 // Second CAS fails because another thread stored the same value,
1073 // but now with padding cleared. Third CAS succeeds.
1074 // We will never need to loop a fourth time, because any value
1075 // written by another thread (whether via store, exchange or
1076 // compare_exchange) will have had its padding cleared.
1077 while (true)
1078 {
1079 // Copy of the expected value so we can clear its padding.
1080 _Vp __orig = __exp;
1081
1082 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1083 __is_weak, int(__s), int(__f)))
1084 return true;
1085
1086 // Copy of the actual value so we can clear its padding.
1087 _Vp __curr = __exp;
1088
1089 // Compare value representations (i.e. ignoring padding).
1090 if (__builtin_memcmp(__atomic_impl::__clear_padding(__orig),
1091 __atomic_impl::__clear_padding(__curr),
1092 sizeof(_Vp)))
1093 {
1094 // Value representations compare unequal, real failure.
1095 __builtin_memcpy(std::__addressof(__e), __pexp,
1096 sizeof(_Vp));
1097 return false;
1098 }
1099 }
1100 }
1101 }
1102#pragma GCC diagnostic pop
1103 } // namespace __atomic_impl
1104
1105#if __cplusplus > 201703L
1106 // Implementation details of atomic_ref and atomic<floating-point>.
1107 namespace __atomic_impl
1108 {
1109 // Like _Val<T> above, but for difference_type arguments.
1110 template<typename _Tp>
1111 using _Diff = __conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>;
1112
1113 template<size_t _Size, size_t _Align>
1114 _GLIBCXX_ALWAYS_INLINE bool
1115 is_lock_free() noexcept
1116 {
1117 // Produce a fake, minimally aligned pointer.
1118 return __atomic_is_lock_free(_Size, reinterpret_cast<void *>(-_Align));
1119 }
1120
1121 template<typename _Tp>
1122 _GLIBCXX_ALWAYS_INLINE void
1123 store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m) noexcept
1124 {
1125 __atomic_store(__ptr, __atomic_impl::__clear_padding(__t), int(__m));
1126 }
1127
1128 template<typename _Tp>
1129 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1130 load(const _Tp* __ptr, memory_order __m) noexcept
1131 {
1132 alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
1133 auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
1134 __atomic_load(__ptr, __dest, int(__m));
1135 return *__dest;
1136 }
1137
1138 template<typename _Tp>
1139 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1140 exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m) noexcept
1141 {
1142 alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
1143 auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
1144 __atomic_exchange(__ptr, __atomic_impl::__clear_padding(__desired),
1145 __dest, int(__m));
1146 return *__dest;
1147 }
1148
1149 template<bool _AtomicRef = false, typename _Tp>
1150 _GLIBCXX_ALWAYS_INLINE bool
1151 compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
1152 _Val<_Tp> __desired, memory_order __success,
1153 memory_order __failure) noexcept
1154 {
1155 return __atomic_impl::__compare_exchange<_AtomicRef>(
1156 *__ptr, __expected, __desired, true, __success, __failure);
1157 }
1158
1159 template<bool _AtomicRef = false, typename _Tp>
1160 _GLIBCXX_ALWAYS_INLINE bool
1161 compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
1162 _Val<_Tp> __desired, memory_order __success,
1163 memory_order __failure) noexcept
1164 {
1165 return __atomic_impl::__compare_exchange<_AtomicRef>(
1166 *__ptr, __expected, __desired, false, __success, __failure);
1167 }
1168
1169#if __glibcxx_atomic_wait
1170 template<typename _Tp>
1171 _GLIBCXX_ALWAYS_INLINE void
1172 wait(const _Tp* __ptr, _Val<_Tp> __old,
1173 memory_order __m = memory_order_seq_cst) noexcept
1174 {
1175 std::__atomic_wait_address_v(__ptr, __old,
1176 [__ptr, __m]() { return __atomic_impl::load(__ptr, __m); });
1177 }
1178
1179 // TODO add const volatile overload
1180
1181 template<typename _Tp>
1182 _GLIBCXX_ALWAYS_INLINE void
1183 notify_one(const _Tp* __ptr) noexcept
1184 { std::__atomic_notify_address(__ptr, false); }
1185
1186 // TODO add const volatile overload
1187
1188 template<typename _Tp>
1189 _GLIBCXX_ALWAYS_INLINE void
1190 notify_all(const _Tp* __ptr) noexcept
1191 { std::__atomic_notify_address(__ptr, true); }
1192
1193 // TODO add const volatile overload
1194#endif // __glibcxx_atomic_wait
1195
1196 template<typename _Tp>
1197 _GLIBCXX_ALWAYS_INLINE _Tp
1198 fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
1199 { return __atomic_fetch_add(__ptr, __i, int(__m)); }
1200
1201 template<typename _Tp>
1202 _GLIBCXX_ALWAYS_INLINE _Tp
1203 fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
1204 { return __atomic_fetch_sub(__ptr, __i, int(__m)); }
1205
1206 template<typename _Tp>
1207 _GLIBCXX_ALWAYS_INLINE _Tp
1208 fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1209 { return __atomic_fetch_and(__ptr, __i, int(__m)); }
1210
1211 template<typename _Tp>
1212 _GLIBCXX_ALWAYS_INLINE _Tp
1213 fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1214 { return __atomic_fetch_or(__ptr, __i, int(__m)); }
1215
1216 template<typename _Tp>
1217 _GLIBCXX_ALWAYS_INLINE _Tp
1218 fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1219 { return __atomic_fetch_xor(__ptr, __i, int(__m)); }
1220
1221 template<typename _Tp>
1222 _GLIBCXX_ALWAYS_INLINE _Tp
1223 __add_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
1224 { return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1225
1226 template<typename _Tp>
1227 _GLIBCXX_ALWAYS_INLINE _Tp
1228 __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
1229 { return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1230
1231 template<typename _Tp>
1232 _GLIBCXX_ALWAYS_INLINE _Tp
1233 __and_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1234 { return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1235
1236 template<typename _Tp>
1237 _GLIBCXX_ALWAYS_INLINE _Tp
1238 __or_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1239 { return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1240
1241 template<typename _Tp>
1242 _GLIBCXX_ALWAYS_INLINE _Tp
1243 __xor_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1244 { return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1245
1246 template<typename _Tp>
1247 concept __atomic_fetch_addable
1248 = requires (_Tp __t) { __atomic_fetch_add(&__t, __t, 0); };
1249
1250 template<typename _Tp>
1251 _Tp
1252 __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1253 {
1254 if constexpr (__atomic_fetch_addable<_Tp>)
1255 return __atomic_fetch_add(__ptr, __i, int(__m));
1256 else
1257 {
1258 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1259 _Val<_Tp> __newval = __oldval + __i;
1260 while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
1261 memory_order_relaxed))
1262 __newval = __oldval + __i;
1263 return __oldval;
1264 }
1265 }
1266
1267 template<typename _Tp>
1268 concept __atomic_fetch_subtractable
1269 = requires (_Tp __t) { __atomic_fetch_sub(&__t, __t, 0); };
1270
1271 template<typename _Tp>
1272 _Tp
1273 __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1274 {
1275 if constexpr (__atomic_fetch_subtractable<_Tp>)
1276 return __atomic_fetch_sub(__ptr, __i, int(__m));
1277 else
1278 {
1279 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1280 _Val<_Tp> __newval = __oldval - __i;
1281 while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
1282 memory_order_relaxed))
1283 __newval = __oldval - __i;
1284 return __oldval;
1285 }
1286 }
1287
1288 template<typename _Tp>
1289 concept __atomic_add_fetchable
1290 = requires (_Tp __t) { __atomic_add_fetch(&__t, __t, 0); };
1291
1292 template<typename _Tp>
1293 _Tp
1294 __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
1295 {
1296 if constexpr (__atomic_add_fetchable<_Tp>)
1297 return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST);
1298 else
1299 {
1300 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1301 _Val<_Tp> __newval = __oldval + __i;
1302 while (!compare_exchange_weak (__ptr, __oldval, __newval,
1303 memory_order_seq_cst,
1304 memory_order_relaxed))
1305 __newval = __oldval + __i;
1306 return __newval;
1307 }
1308 }
1309
1310 template<typename _Tp>
1311 concept __atomic_sub_fetchable
1312 = requires (_Tp __t) { __atomic_sub_fetch(&__t, __t, 0); };
1313
1314 template<typename _Tp>
1315 _Tp
1316 __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
1317 {
1318 if constexpr (__atomic_sub_fetchable<_Tp>)
1319 return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST);
1320 else
1321 {
1322 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1323 _Val<_Tp> __newval = __oldval - __i;
1324 while (!compare_exchange_weak (__ptr, __oldval, __newval,
1325 memory_order_seq_cst,
1326 memory_order_relaxed))
1327 __newval = __oldval - __i;
1328 return __newval;
1329 }
1330 }
1331
1332#if __glibcxx_atomic_min_max
1333 template<typename _Tp>
1334 concept __atomic_fetch_minmaxable
1335 = requires (_Tp __t) {
1336 __atomic_fetch_min(&__t, __t, 0);
1337 __atomic_fetch_max(&__t, __t, 0);
1338 };
1339
1340 template<typename _Tp>
1341 _Tp
1342 __fetch_min(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1343 {
1344 if constexpr (__atomic_fetch_minmaxable<_Tp>)
1345 return __atomic_fetch_min(__ptr, __i, int(__m));
1346 else
1347 {
1348 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1349 _Val<_Tp> __newval = __oldval < __i ? __oldval : __i;
1350 while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
1351 memory_order_relaxed))
1352 __newval = __oldval < __i ? __oldval : __i;
1353 return __oldval;
1354 }
1355 }
1356
1357 template<typename _Tp>
1358 _Tp
1359 __fetch_max(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1360 {
1361 if constexpr (__atomic_fetch_minmaxable<_Tp>)
1362 return __atomic_fetch_max(__ptr, __i, int(__m));
1363 else
1364 {
1365 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1366 _Val<_Tp> __newval = __oldval > __i ? __oldval : __i;
1367 while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
1368 memory_order_relaxed))
1369 __newval = __oldval > __i ? __oldval : __i;
1370 return __oldval;
1371 }
1372 }
1373#endif
1374 } // namespace __atomic_impl
1375
1376 // base class for atomic<floating-point-type>
1377 template<typename _Fp>
1378 struct __atomic_float
1379 {
1380 static_assert(is_floating_point_v<_Fp>);
1381
1382 static constexpr size_t _S_alignment = __alignof__(_Fp);
1383
1384 public:
1385 using value_type = _Fp;
1386 using difference_type = value_type;
1387
1388 static constexpr bool is_always_lock_free
1389 = __atomic_always_lock_free(sizeof(_Fp), 0);
1390
1391 __atomic_float() = default;
1392
1393 constexpr
1394 __atomic_float(_Fp __t) : _M_fp(__t)
1395 { __atomic_impl::__clear_padding(_M_fp); }
1396
1397 __atomic_float(const __atomic_float&) = delete;
1398 __atomic_float& operator=(const __atomic_float&) = delete;
1399 __atomic_float& operator=(const __atomic_float&) volatile = delete;
1400
1401 _Fp
1402 operator=(_Fp __t) volatile noexcept
1403 {
1404 this->store(__t);
1405 return __t;
1406 }
1407
1408 _Fp
1409 operator=(_Fp __t) noexcept
1410 {
1411 this->store(__t);
1412 return __t;
1413 }
1414
1415 bool
1416 is_lock_free() const volatile noexcept
1417 { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1418
1419 bool
1420 is_lock_free() const noexcept
1421 { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1422
1423 void
1424 store(_Fp __t, memory_order __m = memory_order_seq_cst) volatile noexcept
1425 { __atomic_impl::store(&_M_fp, __t, __m); }
1426
1427 void
1428 store(_Fp __t, memory_order __m = memory_order_seq_cst) noexcept
1429 { __atomic_impl::store(&_M_fp, __t, __m); }
1430
1431 _Fp
1432 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
1433 { return __atomic_impl::load(&_M_fp, __m); }
1434
1435 _Fp
1436 load(memory_order __m = memory_order_seq_cst) const noexcept
1437 { return __atomic_impl::load(&_M_fp, __m); }
1438
1439 operator _Fp() const volatile noexcept { return this->load(); }
1440 operator _Fp() const noexcept { return this->load(); }
1441
1442 _Fp
1443 exchange(_Fp __desired,
1444 memory_order __m = memory_order_seq_cst) volatile noexcept
1445 { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1446
1447 _Fp
1448 exchange(_Fp __desired,
1449 memory_order __m = memory_order_seq_cst) noexcept
1450 { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1451
1452 bool
1453 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1454 memory_order __success,
1455 memory_order __failure) noexcept
1456 {
1457 return __atomic_impl::compare_exchange_weak(&_M_fp,
1458 __expected, __desired,
1459 __success, __failure);
1460 }
1461
1462 bool
1463 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1464 memory_order __success,
1465 memory_order __failure) volatile noexcept
1466 {
1467 return __atomic_impl::compare_exchange_weak(&_M_fp,
1468 __expected, __desired,
1469 __success, __failure);
1470 }
1471
1472 bool
1473 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1474 memory_order __success,
1475 memory_order __failure) noexcept
1476 {
1477 return __atomic_impl::compare_exchange_strong(&_M_fp,
1478 __expected, __desired,
1479 __success, __failure);
1480 }
1481
1482 bool
1483 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1484 memory_order __success,
1485 memory_order __failure) volatile noexcept
1486 {
1487 return __atomic_impl::compare_exchange_strong(&_M_fp,
1488 __expected, __desired,
1489 __success, __failure);
1490 }
1491
1492 bool
1493 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1494 memory_order __order = memory_order_seq_cst)
1495 noexcept
1496 {
1497 return compare_exchange_weak(__expected, __desired, __order,
1498 __cmpexch_failure_order(__order));
1499 }
1500
1501 bool
1502 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1503 memory_order __order = memory_order_seq_cst)
1504 volatile noexcept
1505 {
1506 return compare_exchange_weak(__expected, __desired, __order,
1507 __cmpexch_failure_order(__order));
1508 }
1509
1510 bool
1511 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1512 memory_order __order = memory_order_seq_cst)
1513 noexcept
1514 {
1515 return compare_exchange_strong(__expected, __desired, __order,
1516 __cmpexch_failure_order(__order));
1517 }
1518
1519 bool
1520 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1521 memory_order __order = memory_order_seq_cst)
1522 volatile noexcept
1523 {
1524 return compare_exchange_strong(__expected, __desired, __order,
1525 __cmpexch_failure_order(__order));
1526 }
1527
1528#if __glibcxx_atomic_wait
1529 _GLIBCXX_ALWAYS_INLINE void
1530 wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept
1531 { __atomic_impl::wait(&_M_fp, __old, __m); }
1532
1533 // TODO add const volatile overload
1534
1535 _GLIBCXX_ALWAYS_INLINE void
1536 notify_one() const noexcept
1537 { __atomic_impl::notify_one(&_M_fp); }
1538
1539 // TODO add const volatile overload
1540
1541 _GLIBCXX_ALWAYS_INLINE void
1542 notify_all() const noexcept
1543 { __atomic_impl::notify_all(&_M_fp); }
1544
1545 // TODO add const volatile overload
1546#endif // __glibcxx_atomic_wait
1547
1548 value_type
1549 fetch_add(value_type __i,
1550 memory_order __m = memory_order_seq_cst) noexcept
1551 { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1552
1553 value_type
1554 fetch_add(value_type __i,
1555 memory_order __m = memory_order_seq_cst) volatile noexcept
1556 { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1557
1558 value_type
1559 fetch_sub(value_type __i,
1560 memory_order __m = memory_order_seq_cst) noexcept
1561 { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1562
1563 value_type
1564 fetch_sub(value_type __i,
1565 memory_order __m = memory_order_seq_cst) volatile noexcept
1566 { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1567
1568#if __glibcxx_atomic_min_max
1569 value_type
1570 fetch_min(value_type __i,
1571 memory_order __m = memory_order_seq_cst) noexcept
1572 { return __atomic_impl::__fetch_min(&_M_fp, __i, __m); }
1573
1574 value_type
1575 fetch_min(value_type __i,
1576 memory_order __m = memory_order_seq_cst) volatile noexcept
1577 { return __atomic_impl::__fetch_min(&_M_fp, __i, __m); }
1578
1579 value_type
1580 fetch_max(value_type __i,
1581 memory_order __m = memory_order_seq_cst) noexcept
1582 { return __atomic_impl::__fetch_max(&_M_fp, __i, __m); }
1583
1584 value_type
1585 fetch_max(value_type __i,
1586 memory_order __m = memory_order_seq_cst) volatile noexcept
1587 { return __atomic_impl::__fetch_max(&_M_fp, __i, __m); }
1588#endif
1589
1590 value_type
1591 operator+=(value_type __i) noexcept
1592 { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1593
1594 value_type
1595 operator+=(value_type __i) volatile noexcept
1596 { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1597
1598 value_type
1599 operator-=(value_type __i) noexcept
1600 { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1601
1602 value_type
1603 operator-=(value_type __i) volatile noexcept
1604 { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1605
1606 private:
1607 alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0);
1608 };
1609#undef _GLIBCXX20_INIT
1610
1611 // __atomic_ref_base<const _Tp> provides the common APIs for const and
1612 // non-const types,
1613 // __atomic_ref_base<_Tp> inherits from __atomic_ref_base<const _Tp>,
1614 // and provides the common APIs implementing constraints in [atomic.ref].
1615 // __atomic_ref<_Tp> inherits from __atomic_ref_base<_Tp> (const or non-const)
1616 // adds type-specific mutating APIs.
1617 // atomic_ref inherits from __atomic_ref;
1618
1619 template<typename _Tp>
1620 struct __atomic_ref_base;
1621
1622 template<typename _Tp>
1623 struct __atomic_ref_base<const _Tp>
1624 {
1625 private:
1626 using _Vt = remove_cv_t<_Tp>;
1627
1628 static consteval bool
1629 _S_is_always_lock_free()
1630 {
1631 if constexpr (is_pointer_v<_Vt>)
1632 return ATOMIC_POINTER_LOCK_FREE == 2;
1633 else
1634 return __atomic_always_lock_free(sizeof(_Vt), 0);
1635 }
1636
1637 static consteval int
1638 _S_required_alignment()
1639 {
1640 if constexpr (is_floating_point_v<_Vt> || is_pointer_v<_Vt>)
1641 return __alignof__(_Vt);
1642 else if constexpr ((sizeof(_Vt) & (sizeof(_Vt) - 1)) || sizeof(_Vt) > 16)
1643 return alignof(_Vt);
1644 else
1645 // 1/2/4/8/16-byte types, including integral types,
1646 // must be aligned to at least their size.
1647 return (sizeof(_Vt) > alignof(_Vt)) ? sizeof(_Vt) : alignof(_Vt);
1648 }
1649
1650 public:
1651 using value_type = _Vt;
1652 static_assert(is_trivially_copyable_v<value_type>);
1653
1654 static constexpr bool is_always_lock_free = _S_is_always_lock_free();
1655 static_assert(is_always_lock_free || !is_volatile_v<_Tp>,
1656 "atomic operations on volatile T must be lock-free");
1657
1658 static constexpr size_t required_alignment = _S_required_alignment();
1659
1660 __atomic_ref_base() = delete;
1661 __atomic_ref_base& operator=(const __atomic_ref_base&) = delete;
1662
1663 explicit
1664 __atomic_ref_base(const _Tp* __ptr) noexcept
1665 : _M_ptr(const_cast<_Tp*>(__ptr))
1666 { }
1667
1668 __atomic_ref_base(const __atomic_ref_base&) noexcept = default;
1669
1670 operator value_type() const noexcept { return this->load(); }
1671
1672 bool
1673 is_lock_free() const noexcept
1674 { return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
1675
1676 value_type
1677 load(memory_order __m = memory_order_seq_cst) const noexcept
1678 { return __atomic_impl::load(_M_ptr, __m); }
1679
1680#if __glibcxx_atomic_wait
1681 _GLIBCXX_ALWAYS_INLINE void
1682 wait(value_type __old, memory_order __m = memory_order_seq_cst) const noexcept
1683 {
1684 // TODO remove when volatile is supported
1685 static_assert(!is_volatile_v<_Tp>,
1686 "atomic waits on volatile are not supported");
1687 __atomic_impl::wait(_M_ptr, __old, __m);
1688 }
1689#endif // __glibcxx_atomic_wait
1690
1691#if __glibcxx_atomic_ref >= 202411L
1692 _GLIBCXX_ALWAYS_INLINE constexpr const _Tp*
1693 address() const noexcept
1694 { return _M_ptr; }
1695#endif // __glibcxx_atomic_ref >= 202411L
1696
1697 protected:
1698 _Tp* _M_ptr;
1699 };
1700
1701 template<typename _Tp>
1702 struct __atomic_ref_base
1703 : __atomic_ref_base<const _Tp>
1704 {
1705 using value_type = typename __atomic_ref_base<const _Tp>::value_type;
1706
1707 explicit
1708 __atomic_ref_base(_Tp* __ptr) noexcept
1709 : __atomic_ref_base<const _Tp>(__ptr)
1710 { }
1711
1712 value_type
1713 operator=(value_type __t) const noexcept
1714 {
1715 this->store(__t);
1716 return __t;
1717 }
1718
1719 void
1720 store(value_type __t, memory_order __m = memory_order_seq_cst) const noexcept
1721 { __atomic_impl::store(this->_M_ptr, __t, __m); }
1722
1723 value_type
1724 exchange(value_type __desired, memory_order __m = memory_order_seq_cst)
1725 const noexcept
1726 { return __atomic_impl::exchange(this->_M_ptr, __desired, __m); }
1727
1728 bool
1729 compare_exchange_weak(value_type& __expected, value_type __desired,
1730 memory_order __success,
1731 memory_order __failure) const noexcept
1732 {
1733 return __atomic_impl::compare_exchange_weak<true>(
1734 this->_M_ptr, __expected, __desired, __success, __failure);
1735 }
1736
1737 bool
1738 compare_exchange_strong(value_type& __expected, value_type __desired,
1739 memory_order __success,
1740 memory_order __failure) const noexcept
1741 {
1742 return __atomic_impl::compare_exchange_strong<true>(
1743 this->_M_ptr, __expected, __desired, __success, __failure);
1744 }
1745
1746 bool
1747 compare_exchange_weak(value_type& __expected, value_type __desired,
1748 memory_order __order = memory_order_seq_cst)
1749 const noexcept
1750 {
1751 return compare_exchange_weak(__expected, __desired, __order,
1752 __cmpexch_failure_order(__order));
1753 }
1754
1755 bool
1756 compare_exchange_strong(value_type& __expected, value_type __desired,
1757 memory_order __order = memory_order_seq_cst)
1758 const noexcept
1759 {
1760 return compare_exchange_strong(__expected, __desired, __order,
1761 __cmpexch_failure_order(__order));
1762 }
1763
1764#if __glibcxx_atomic_wait
1765 _GLIBCXX_ALWAYS_INLINE void
1766 notify_one() const noexcept
1767 {
1768 // TODO remove when volatile is supported
1769 static_assert(!is_volatile_v<_Tp>,
1770 "atomic waits on volatile are not supported");
1771 __atomic_impl::notify_one(this->_M_ptr);
1772 }
1773
1774 _GLIBCXX_ALWAYS_INLINE void
1775 notify_all() const noexcept
1776 {
1777 // TODO remove when volatile is supported
1778 static_assert(!is_volatile_v<_Tp>,
1779 "atomic waits on volatile are not supported");
1780 __atomic_impl::notify_all(this->_M_ptr);
1781 }
1782#endif // __glibcxx_atomic_wait
1783
1784#if __glibcxx_atomic_ref >= 202411L
1785 _GLIBCXX_ALWAYS_INLINE constexpr _Tp*
1786 address() const noexcept
1787 { return this->_M_ptr; }
1788#endif // __glibcxx_atomic_ref >= 202411L
1789 };
1790
1791 template<typename _Tp,
1792 bool = is_integral_v<_Tp> && !is_same_v<remove_cv_t<_Tp>, bool>,
1793 bool = is_floating_point_v<_Tp>,
1794 bool = is_pointer_v<_Tp>>
1795 struct __atomic_ref;
1796
1797 // base class for non-integral, non-floating-point, non-pointer types
1798 template<typename _Tp>
1799 struct __atomic_ref<_Tp, false, false, false>
1800 : __atomic_ref_base<_Tp>
1801 {
1802 using __atomic_ref_base<_Tp>::__atomic_ref_base;
1803 using __atomic_ref_base<_Tp>::operator=;
1804 };
1805
1806 template<typename _Tp>
1807 struct __atomic_ref<const _Tp, false, false, false>
1808 : __atomic_ref_base<const _Tp>
1809 {
1810 using __atomic_ref_base<const _Tp>::__atomic_ref_base;
1811 };
1812
1813 // base class for atomic_ref<integral-type>
1814 template<typename _Tp>
1815 struct __atomic_ref<_Tp, true, false, false>
1816 : __atomic_ref_base<_Tp>
1817 {
1818 using value_type = typename __atomic_ref_base<_Tp>::value_type;
1819 using difference_type = value_type;
1820
1821 using __atomic_ref_base<_Tp>::__atomic_ref_base;
1822 using __atomic_ref_base<_Tp>::operator=;
1823
1824 value_type
1825 fetch_add(value_type __i,
1826 memory_order __m = memory_order_seq_cst) const noexcept
1827 { return __atomic_impl::fetch_add(this->_M_ptr, __i, __m); }
1828
1829 value_type
1830 fetch_sub(value_type __i,
1831 memory_order __m = memory_order_seq_cst) const noexcept
1832 { return __atomic_impl::fetch_sub(this->_M_ptr, __i, __m); }
1833
1834 value_type
1835 fetch_and(value_type __i,
1836 memory_order __m = memory_order_seq_cst) const noexcept
1837 { return __atomic_impl::fetch_and(this->_M_ptr, __i, __m); }
1838
1839 value_type
1840 fetch_or(value_type __i,
1841 memory_order __m = memory_order_seq_cst) const noexcept
1842 { return __atomic_impl::fetch_or(this->_M_ptr, __i, __m); }
1843
1844 value_type
1845 fetch_xor(value_type __i,
1846 memory_order __m = memory_order_seq_cst) const noexcept
1847 { return __atomic_impl::fetch_xor(this->_M_ptr, __i, __m); }
1848
1849#if __glibcxx_atomic_min_max
1850 value_type
1851 fetch_min(value_type __i,
1852 memory_order __m = memory_order_seq_cst) const noexcept
1853 { return __atomic_impl::__fetch_min(this->_M_ptr, __i, __m); }
1854
1855 value_type
1856 fetch_max(value_type __i,
1857 memory_order __m = memory_order_seq_cst) const noexcept
1858 { return __atomic_impl::__fetch_max(this->_M_ptr, __i, __m); }
1859#endif
1860
1861 _GLIBCXX_ALWAYS_INLINE value_type
1862 operator++(int) const noexcept
1863 { return fetch_add(1); }
1864
1865 _GLIBCXX_ALWAYS_INLINE value_type
1866 operator--(int) const noexcept
1867 { return fetch_sub(1); }
1868
1869 value_type
1870 operator++() const noexcept
1871 { return __atomic_impl::__add_fetch(this->_M_ptr, value_type(1)); }
1872
1873 value_type
1874 operator--() const noexcept
1875 { return __atomic_impl::__sub_fetch(this->_M_ptr, value_type(1)); }
1876
1877 value_type
1878 operator+=(value_type __i) const noexcept
1879 { return __atomic_impl::__add_fetch(this->_M_ptr, __i); }
1880
1881 value_type
1882 operator-=(value_type __i) const noexcept
1883 { return __atomic_impl::__sub_fetch(this->_M_ptr, __i); }
1884
1885 value_type
1886 operator&=(value_type __i) const noexcept
1887 { return __atomic_impl::__and_fetch(this->_M_ptr, __i); }
1888
1889 value_type
1890 operator|=(value_type __i) const noexcept
1891 { return __atomic_impl::__or_fetch(this->_M_ptr, __i); }
1892
1893 value_type
1894 operator^=(value_type __i) const noexcept
1895 { return __atomic_impl::__xor_fetch(this->_M_ptr, __i); }
1896 };
1897
1898 template<typename _Tp>
1899 struct __atomic_ref<const _Tp, true, false, false>
1900 : __atomic_ref_base<const _Tp>
1901 {
1902 using difference_type = typename __atomic_ref_base<const _Tp>::value_type;
1903 using __atomic_ref_base<const _Tp>::__atomic_ref_base;
1904 };
1905
1906 // base class for atomic_ref<floating-point-type>
1907 template<typename _Fp>
1908 struct __atomic_ref<_Fp, false, true, false>
1909 : __atomic_ref_base<_Fp>
1910 {
1911 using value_type = typename __atomic_ref_base<_Fp>::value_type;
1912 using difference_type = value_type;
1913
1914 using __atomic_ref_base<_Fp>::__atomic_ref_base;
1915 using __atomic_ref_base<_Fp>::operator=;
1916
1917 value_type
1918 fetch_add(value_type __i,
1919 memory_order __m = memory_order_seq_cst) const noexcept
1920 { return __atomic_impl::__fetch_add_flt(this->_M_ptr, __i, __m); }
1921
1922 value_type
1923 fetch_sub(value_type __i,
1924 memory_order __m = memory_order_seq_cst) const noexcept
1925 { return __atomic_impl::__fetch_sub_flt(this->_M_ptr, __i, __m); }
1926
1927#if __glibcxx_atomic_min_max
1928 value_type
1929 fetch_min(value_type __i,
1930 memory_order __m = memory_order_seq_cst) const noexcept
1931 { return __atomic_impl::__fetch_min(this->_M_ptr, __i, __m); }
1932
1933 value_type
1934 fetch_max(value_type __i,
1935 memory_order __m = memory_order_seq_cst) const noexcept
1936 { return __atomic_impl::__fetch_max(this->_M_ptr, __i, __m); }
1937#endif
1938
1939 value_type
1940 operator+=(value_type __i) const noexcept
1941 { return __atomic_impl::__add_fetch_flt(this->_M_ptr, __i); }
1942
1943 value_type
1944 operator-=(value_type __i) const noexcept
1945 { return __atomic_impl::__sub_fetch_flt(this->_M_ptr, __i); }
1946 };
1947
1948 template<typename _Fp>
1949 struct __atomic_ref<const _Fp, false, true, false>
1950 : __atomic_ref_base<const _Fp>
1951 {
1952 using difference_type = typename __atomic_ref_base<const _Fp>::value_type;
1953 using __atomic_ref_base<const _Fp>::__atomic_ref_base;
1954 };
1955
1956 // base class for atomic_ref<pointer-type>
1957 template<typename _Pt>
1958 struct __atomic_ref<_Pt, false, false, true>
1959 : __atomic_ref_base<_Pt>
1960 {
1961 using value_type = typename __atomic_ref_base<_Pt>::value_type;
1962 using difference_type = ptrdiff_t;
1963
1964 using __atomic_ref_base<_Pt>::__atomic_ref_base;
1965 using __atomic_ref_base<_Pt>::operator=;
1966 _GLIBCXX_ALWAYS_INLINE value_type
1967 fetch_add(difference_type __d,
1968 memory_order __m = memory_order_seq_cst) const noexcept
1969 { return __atomic_impl::fetch_add(this->_M_ptr, _S_type_size(__d), __m); }
1970
1971 _GLIBCXX_ALWAYS_INLINE value_type
1972 fetch_sub(difference_type __d,
1973 memory_order __m = memory_order_seq_cst) const noexcept
1974 { return __atomic_impl::fetch_sub(this->_M_ptr, _S_type_size(__d), __m); }
1975
1976 value_type
1977 operator++(int) const noexcept
1978 { return fetch_add(1); }
1979
1980 value_type
1981 operator--(int) const noexcept
1982 { return fetch_sub(1); }
1983
1984 value_type
1985 operator++() const noexcept
1986 {
1987 return __atomic_impl::__add_fetch(this->_M_ptr, _S_type_size(1));
1988 }
1989
1990 value_type
1991 operator--() const noexcept
1992 {
1993 return __atomic_impl::__sub_fetch(this->_M_ptr, _S_type_size(1));
1994 }
1995
1996 value_type
1997 operator+=(difference_type __d) const noexcept
1998 {
1999 return __atomic_impl::__add_fetch(this->_M_ptr, _S_type_size(__d));
2000 }
2001
2002 value_type
2003 operator-=(difference_type __d) const noexcept
2004 {
2005 return __atomic_impl::__sub_fetch(this->_M_ptr, _S_type_size(__d));
2006 }
2007
2008 private:
2009 static constexpr ptrdiff_t
2010 _S_type_size(ptrdiff_t __d) noexcept
2011 {
2012 using _Et = remove_pointer_t<value_type>;
2013 static_assert(is_object_v<_Et>);
2014 return __d * sizeof(_Et);
2015 }
2016 };
2017
2018 template<typename _Pt>
2019 struct __atomic_ref<const _Pt, false, false, true>
2020 : __atomic_ref_base<const _Pt>
2021 {
2022 using difference_type = ptrdiff_t;
2023 using __atomic_ref_base<const _Pt>::__atomic_ref_base;
2024 };
2025#endif // C++2a
2026
2027 /// @endcond
2028
2029 /// @} group atomics
2030
2031_GLIBCXX_END_NAMESPACE_VERSION
2032} // namespace std
2033
2034#endif
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
Definition move.h:52
_Tp kill_dependency(_Tp __y) noexcept
kill_dependency
memory_order
Enumeration for memory_order.
Definition atomic_base.h:66
ISO C++ entities toplevel namespace is std.
constexpr bitset< _Nb > operator|(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Definition bitset:1614
constexpr bitset< _Nb > operator&(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Definition bitset:1604