libstdc++
atomicity.h
Go to the documentation of this file.
1// Support for atomic operations -*- C++ -*-
2
3// Copyright (C) 2004-2025 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file ext/atomicity.h
26 * This file is a GNU extension to the Standard C++ Library.
27 */
28
29#ifndef _GLIBCXX_ATOMICITY_H
30#define _GLIBCXX_ATOMICITY_H 1
31
32#ifdef _GLIBCXX_SYSHDR
33#pragma GCC system_header
34#endif
35
36#include <bits/c++config.h>
37#include <bits/gthr.h>
38#include <bits/atomic_word.h>
39#if __has_include(<sys/single_threaded.h>)
40# include <sys/single_threaded.h>
41#endif
42#if __cplusplus >= 201103L
43# include <type_traits> // make_unsigned_t
44#endif
45
46namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)
47{
48_GLIBCXX_BEGIN_NAMESPACE_VERSION
49
50 __attribute__((__always_inline__))
51 inline bool
52 __is_single_threaded() _GLIBCXX_NOTHROW
53 {
54#ifndef __GTHREADS
55 return true;
56#elif __has_include(<sys/single_threaded.h>)
57 return ::__libc_single_threaded;
58#else
59 return !__gthread_active_p();
60#endif
61 }
62
63 // Functions for portable atomic access.
64 // To abstract locking primitives across all thread policies, use:
65 // __exchange_and_add_dispatch
66 // __atomic_add_dispatch
67#ifdef _GLIBCXX_ATOMIC_WORD_BUILTINS
68 inline _Atomic_word
69 __attribute__((__always_inline__))
70 __exchange_and_add(volatile _Atomic_word* __mem, int __val)
71 { return __atomic_fetch_add(__mem, __val, __ATOMIC_ACQ_REL); }
72
73 inline void
74 __attribute__((__always_inline__))
75 __atomic_add(volatile _Atomic_word* __mem, int __val)
76 { __atomic_fetch_add(__mem, __val, __ATOMIC_ACQ_REL); }
77#else // Defined in config/cpu/.../atomicity.h
78 _Atomic_word
79 __exchange_and_add(volatile _Atomic_word*, int) _GLIBCXX_NOTHROW;
80
81 void
82 __atomic_add(volatile _Atomic_word*, int) _GLIBCXX_NOTHROW;
83#endif
84
85#if __cplusplus < 201103L
86 // The array bound will be ill-formed in the very unlikely case that
87 // _Atomic_word is wider than long and we need to use unsigned long long
88 // below in __exchange_and_add_single and __atomic_add_single.
89 typedef int
90 _Atomic_word_fits_in_long[sizeof(_Atomic_word) <= sizeof(long) ? 1 : -1];
91#endif
92
93 inline _Atomic_word
94 __attribute__((__always_inline__))
95 __exchange_and_add_single(_Atomic_word* __mem, int __val)
96 {
97 _Atomic_word __result = *__mem;
98 // Do the addition with an unsigned type so that overflow is well defined.
99#if __cplusplus >= 201103L
100 std::make_unsigned<_Atomic_word>::type __u;
101#else
102 // For most targets make_unsigned_t<_Atomic_word> is unsigned int,
103 // but 64-bit sparc uses long for _Atomic_word.
104 // Sign-extending to unsigned long works for both cases.
105 unsigned long __u;
106#endif
107 __u = __result;
108 __u += __val;
109 *__mem = __u;
110 return __result;
111 }
112
113 inline void
114 __attribute__((__always_inline__))
115 __atomic_add_single(_Atomic_word* __mem, int __val)
116 {
117#if __cplusplus >= 201103L
118 std::make_unsigned<_Atomic_word>::type __u;
119#else
120 unsigned long __u; // see above
121#endif
122 __u = *__mem;
123 __u += __val;
124 *__mem = __u;
125 }
126
127 inline _Atomic_word
128 __attribute__ ((__always_inline__))
129 __exchange_and_add_dispatch(_Atomic_word* __mem, int __val)
130 {
131 if (__is_single_threaded())
132 return __exchange_and_add_single(__mem, __val);
133 else
134 return __exchange_and_add(__mem, __val);
135 }
136
137 inline void
138 __attribute__ ((__always_inline__))
139 __atomic_add_dispatch(_Atomic_word* __mem, int __val)
140 {
141 if (__is_single_threaded())
142 __atomic_add_single(__mem, __val);
143 else
144 __atomic_add(__mem, __val);
145 }
146
147_GLIBCXX_END_NAMESPACE_VERSION
148} // namespace
149
150// Even if the CPU doesn't need a memory barrier, we need to ensure
151// that the compiler doesn't reorder memory accesses across the
152// barriers.
153#ifndef _GLIBCXX_READ_MEM_BARRIER
154#define _GLIBCXX_READ_MEM_BARRIER __atomic_thread_fence (__ATOMIC_ACQUIRE)
155#endif
156#ifndef _GLIBCXX_WRITE_MEM_BARRIER
157#define _GLIBCXX_WRITE_MEM_BARRIER __atomic_thread_fence (__ATOMIC_RELEASE)
158#endif
159
160#endif
GNU extensions for public use.